aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-12 17:59:53 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-12 17:59:53 -0500
commit87c779baabff157f09db6fce417a7544220f9f00 (patch)
tree3a11ac4b554a3fe52d331f1f4f72997582e87cac /drivers/dma
parenteea0cf3fcd3243666e0bb792c560ec128b1f06a1 (diff)
parenta9507ca3fb90987db5c6cc385885782cb05d4967 (diff)
Merge branch 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine updates from Vinod Koul: "Main features this time are: - BAM v1.3.0 support form qcom bam dma - support for Allwinner sun8i dma - atmels eXtended DMA Controller driver - chancnt cleanup by Maxime - fixes spread over drivers" * 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma: (56 commits) dmaenegine: Delete a check before free_percpu() dmaengine: ioatdma: fix dma mapping errors dma: cppi41: add a delay while setting the TD bit dma: cppi41: wait longer for the HW to return the descriptor dmaengine: fsl-edma: fixup reg offset and hw S/G support in big-endian model dmaengine: fsl-edma: fix calculation of remaining bytes drivers/dma/pch_dma: declare pch_dma_id_table as static dmaengine: ste_dma40: fix error return code dma: imx-sdma: clarify about firmware not found error Documentation: devicetree: Fix Xilinx VDMA specification dmaengine: pl330: update author info dmaengine: clarify the issue_pending expectations dmaengine: at_xdmac: Add DMA_PRIVATE ARM: dts: at_xdmac: fix bad value of dma-cells in documentation dmaengine: at_xdmac: fix missing spin_unlock dmaengine: at_xdmac: fix a bug in transfer residue computation dmaengine: at_xdmac: fix software lockup at_xdmac_tx_status() dmaengine: at_xdmac: remove chancnt affectation dmaengine: at_xdmac: prefer usage of readl/writel_relaxed dmaengine: xdmac: fix print warning on dma_addr_t variable ...
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig11
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/amba-pl08x.c2
-rw-r--r--drivers/dma/at_xdmac.c1524
-rw-r--r--drivers/dma/bcm2835-dma.c3
-rw-r--r--drivers/dma/cppi41.c12
-rw-r--r--drivers/dma/dma-jz4740.c4
-rw-r--r--drivers/dma/dmaengine.c3
-rw-r--r--drivers/dma/fsl-edma.c191
-rw-r--r--drivers/dma/fsldma.c1
-rw-r--r--drivers/dma/imx-sdma.c6
-rw-r--r--drivers/dma/ioat/dma_v3.c35
-rw-r--r--drivers/dma/iop-adma.c1
-rw-r--r--drivers/dma/k3dma.c4
-rw-r--r--drivers/dma/mmp_pdma.c1
-rw-r--r--drivers/dma/mmp_tdma.c1
-rw-r--r--drivers/dma/mpc512x_dma.c13
-rw-r--r--drivers/dma/nbpfaxi.c1
-rw-r--r--drivers/dma/omap-dma.c2
-rw-r--r--drivers/dma/pch_dma.c2
-rw-r--r--drivers/dma/pl330.c7
-rw-r--r--drivers/dma/qcom_bam_dma.c231
-rw-r--r--drivers/dma/s3c24xx-dma.c1
-rw-r--r--drivers/dma/sa11x0-dma.c3
-rw-r--r--drivers/dma/sh/rcar-audmapp.c3
-rw-r--r--drivers/dma/sh/rcar-hpbdma.c3
-rw-r--r--drivers/dma/sh/shdma-base.c4
-rw-r--r--drivers/dma/sh/shdma-of.c1
-rw-r--r--drivers/dma/sh/shdmac.c2
-rw-r--r--drivers/dma/sh/sudmac.c3
-rw-r--r--drivers/dma/sirf-dma.c5
-rw-r--r--drivers/dma/ste_dma40.c1
-rw-r--r--drivers/dma/sun6i-dma.c122
-rw-r--r--drivers/dma/tegra20-apb-dma.c1
-rw-r--r--drivers/dma/timb_dma.c1
-rw-r--r--drivers/dma/xilinx/xilinx_vdma.c13
36 files changed, 1953 insertions, 266 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index de469821bc1b..f2b2c4e87aef 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -107,6 +107,13 @@ config AT_HDMAC
107 help 107 help
108 Support the Atmel AHB DMA controller. 108 Support the Atmel AHB DMA controller.
109 109
110config AT_XDMAC
111 tristate "Atmel XDMA support"
112 depends on ARCH_AT91
113 select DMA_ENGINE
114 help
115 Support the Atmel XDMA controller.
116
110config FSL_DMA 117config FSL_DMA
111 tristate "Freescale Elo series DMA support" 118 tristate "Freescale Elo series DMA support"
112 depends on FSL_SOC 119 depends on FSL_SOC
@@ -395,12 +402,12 @@ config XILINX_VDMA
395 402
396config DMA_SUN6I 403config DMA_SUN6I
397 tristate "Allwinner A31 SoCs DMA support" 404 tristate "Allwinner A31 SoCs DMA support"
398 depends on MACH_SUN6I || COMPILE_TEST 405 depends on MACH_SUN6I || MACH_SUN8I || COMPILE_TEST
399 depends on RESET_CONTROLLER 406 depends on RESET_CONTROLLER
400 select DMA_ENGINE 407 select DMA_ENGINE
401 select DMA_VIRTUAL_CHANNELS 408 select DMA_VIRTUAL_CHANNELS
402 help 409 help
403 Support for the DMA engine for Allwinner A31 SoCs. 410 Support for the DMA engine first found in Allwinner A31 SoCs.
404 411
405config NBPFAXI_DMA 412config NBPFAXI_DMA
406 tristate "Renesas Type-AXI NBPF DMA support" 413 tristate "Renesas Type-AXI NBPF DMA support"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index cb626c179911..2022b5451377 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
16obj-$(CONFIG_MV_XOR) += mv_xor.o 16obj-$(CONFIG_MV_XOR) += mv_xor.o
17obj-$(CONFIG_DW_DMAC_CORE) += dw/ 17obj-$(CONFIG_DW_DMAC_CORE) += dw/
18obj-$(CONFIG_AT_HDMAC) += at_hdmac.o 18obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
19obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
19obj-$(CONFIG_MX3_IPU) += ipu/ 20obj-$(CONFIG_MX3_IPU) += ipu/
20obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o 21obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
21obj-$(CONFIG_SH_DMAE_BASE) += sh/ 22obj-$(CONFIG_SH_DMAE_BASE) += sh/
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index e34024b000a4..1364d00881dd 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -2164,7 +2164,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2164 __func__, ret); 2164 __func__, ret);
2165 goto out_no_memcpy; 2165 goto out_no_memcpy;
2166 } 2166 }
2167 pl08x->memcpy.chancnt = ret;
2168 2167
2169 /* Register slave channels */ 2168 /* Register slave channels */
2170 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, 2169 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
@@ -2175,7 +2174,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2175 __func__, ret); 2174 __func__, ret);
2176 goto out_no_slave; 2175 goto out_no_slave;
2177 } 2176 }
2178 pl08x->slave.chancnt = ret;
2179 2177
2180 ret = dma_async_device_register(&pl08x->memcpy); 2178 ret = dma_async_device_register(&pl08x->memcpy);
2181 if (ret) { 2179 if (ret) {
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
new file mode 100644
index 000000000000..b60d77a22df6
--- /dev/null
+++ b/drivers/dma/at_xdmac.c
@@ -0,0 +1,1524 @@
1/*
2 * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems)
3 *
4 * Copyright (C) 2014 Atmel Corporation
5 *
6 * Author: Ludovic Desroches <ludovic.desroches@atmel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published by
10 * the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <asm/barrier.h>
22#include <dt-bindings/dma/at91.h>
23#include <linux/clk.h>
24#include <linux/dmaengine.h>
25#include <linux/dmapool.h>
26#include <linux/interrupt.h>
27#include <linux/irq.h>
28#include <linux/list.h>
29#include <linux/module.h>
30#include <linux/of_dma.h>
31#include <linux/of_platform.h>
32#include <linux/platform_device.h>
33#include <linux/pm.h>
34
35#include "dmaengine.h"
36
37/* Global registers */
38#define AT_XDMAC_GTYPE 0x00 /* Global Type Register */
39#define AT_XDMAC_NB_CH(i) (((i) & 0x1F) + 1) /* Number of Channels Minus One */
40#define AT_XDMAC_FIFO_SZ(i) (((i) >> 5) & 0x7FF) /* Number of Bytes */
41#define AT_XDMAC_NB_REQ(i) ((((i) >> 16) & 0x3F) + 1) /* Number of Peripheral Requests Minus One */
42#define AT_XDMAC_GCFG 0x04 /* Global Configuration Register */
43#define AT_XDMAC_GWAC 0x08 /* Global Weighted Arbiter Configuration Register */
44#define AT_XDMAC_GIE 0x0C /* Global Interrupt Enable Register */
45#define AT_XDMAC_GID 0x10 /* Global Interrupt Disable Register */
46#define AT_XDMAC_GIM 0x14 /* Global Interrupt Mask Register */
47#define AT_XDMAC_GIS 0x18 /* Global Interrupt Status Register */
48#define AT_XDMAC_GE 0x1C /* Global Channel Enable Register */
49#define AT_XDMAC_GD 0x20 /* Global Channel Disable Register */
50#define AT_XDMAC_GS 0x24 /* Global Channel Status Register */
51#define AT_XDMAC_GRS 0x28 /* Global Channel Read Suspend Register */
52#define AT_XDMAC_GWS 0x2C /* Global Write Suspend Register */
53#define AT_XDMAC_GRWS 0x30 /* Global Channel Read Write Suspend Register */
54#define AT_XDMAC_GRWR 0x34 /* Global Channel Read Write Resume Register */
55#define AT_XDMAC_GSWR 0x38 /* Global Channel Software Request Register */
56#define AT_XDMAC_GSWS 0x3C /* Global channel Software Request Status Register */
57#define AT_XDMAC_GSWF 0x40 /* Global Channel Software Flush Request Register */
58#define AT_XDMAC_VERSION 0xFFC /* XDMAC Version Register */
59
60/* Channel relative registers offsets */
61#define AT_XDMAC_CIE 0x00 /* Channel Interrupt Enable Register */
62#define AT_XDMAC_CIE_BIE BIT(0) /* End of Block Interrupt Enable Bit */
63#define AT_XDMAC_CIE_LIE BIT(1) /* End of Linked List Interrupt Enable Bit */
64#define AT_XDMAC_CIE_DIE BIT(2) /* End of Disable Interrupt Enable Bit */
65#define AT_XDMAC_CIE_FIE BIT(3) /* End of Flush Interrupt Enable Bit */
66#define AT_XDMAC_CIE_RBEIE BIT(4) /* Read Bus Error Interrupt Enable Bit */
67#define AT_XDMAC_CIE_WBEIE BIT(5) /* Write Bus Error Interrupt Enable Bit */
68#define AT_XDMAC_CIE_ROIE BIT(6) /* Request Overflow Interrupt Enable Bit */
69#define AT_XDMAC_CID 0x04 /* Channel Interrupt Disable Register */
70#define AT_XDMAC_CID_BID BIT(0) /* End of Block Interrupt Disable Bit */
71#define AT_XDMAC_CID_LID BIT(1) /* End of Linked List Interrupt Disable Bit */
72#define AT_XDMAC_CID_DID BIT(2) /* End of Disable Interrupt Disable Bit */
73#define AT_XDMAC_CID_FID BIT(3) /* End of Flush Interrupt Disable Bit */
74#define AT_XDMAC_CID_RBEID BIT(4) /* Read Bus Error Interrupt Disable Bit */
75#define AT_XDMAC_CID_WBEID BIT(5) /* Write Bus Error Interrupt Disable Bit */
76#define AT_XDMAC_CID_ROID BIT(6) /* Request Overflow Interrupt Disable Bit */
77#define AT_XDMAC_CIM 0x08 /* Channel Interrupt Mask Register */
78#define AT_XDMAC_CIM_BIM BIT(0) /* End of Block Interrupt Mask Bit */
79#define AT_XDMAC_CIM_LIM BIT(1) /* End of Linked List Interrupt Mask Bit */
80#define AT_XDMAC_CIM_DIM BIT(2) /* End of Disable Interrupt Mask Bit */
81#define AT_XDMAC_CIM_FIM BIT(3) /* End of Flush Interrupt Mask Bit */
82#define AT_XDMAC_CIM_RBEIM BIT(4) /* Read Bus Error Interrupt Mask Bit */
83#define AT_XDMAC_CIM_WBEIM BIT(5) /* Write Bus Error Interrupt Mask Bit */
84#define AT_XDMAC_CIM_ROIM BIT(6) /* Request Overflow Interrupt Mask Bit */
85#define AT_XDMAC_CIS 0x0C /* Channel Interrupt Status Register */
86#define AT_XDMAC_CIS_BIS BIT(0) /* End of Block Interrupt Status Bit */
87#define AT_XDMAC_CIS_LIS BIT(1) /* End of Linked List Interrupt Status Bit */
88#define AT_XDMAC_CIS_DIS BIT(2) /* End of Disable Interrupt Status Bit */
89#define AT_XDMAC_CIS_FIS BIT(3) /* End of Flush Interrupt Status Bit */
90#define AT_XDMAC_CIS_RBEIS BIT(4) /* Read Bus Error Interrupt Status Bit */
91#define AT_XDMAC_CIS_WBEIS BIT(5) /* Write Bus Error Interrupt Status Bit */
92#define AT_XDMAC_CIS_ROIS BIT(6) /* Request Overflow Interrupt Status Bit */
93#define AT_XDMAC_CSA 0x10 /* Channel Source Address Register */
94#define AT_XDMAC_CDA 0x14 /* Channel Destination Address Register */
95#define AT_XDMAC_CNDA 0x18 /* Channel Next Descriptor Address Register */
96#define AT_XDMAC_CNDA_NDAIF(i) ((i) & 0x1) /* Channel x Next Descriptor Interface */
97#define AT_XDMAC_CNDA_NDA(i) ((i) & 0xfffffffc) /* Channel x Next Descriptor Address */
98#define AT_XDMAC_CNDC 0x1C /* Channel Next Descriptor Control Register */
99#define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */
100#define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */
101#define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */
102#define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */
103#define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */
104#define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */
105#define AT_XDMAC_CNDC_NDVIEW_NDV3 (0x3 << 3) /* Channel x Next Descriptor View 3 */
106#define AT_XDMAC_CUBC 0x20 /* Channel Microblock Control Register */
107#define AT_XDMAC_CBC 0x24 /* Channel Block Control Register */
108#define AT_XDMAC_CC 0x28 /* Channel Configuration Register */
109#define AT_XDMAC_CC_TYPE (0x1 << 0) /* Channel Transfer Type */
110#define AT_XDMAC_CC_TYPE_MEM_TRAN (0x0 << 0) /* Memory to Memory Transfer */
111#define AT_XDMAC_CC_TYPE_PER_TRAN (0x1 << 0) /* Peripheral to Memory or Memory to Peripheral Transfer */
112#define AT_XDMAC_CC_MBSIZE_MASK (0x3 << 1)
113#define AT_XDMAC_CC_MBSIZE_SINGLE (0x0 << 1)
114#define AT_XDMAC_CC_MBSIZE_FOUR (0x1 << 1)
115#define AT_XDMAC_CC_MBSIZE_EIGHT (0x2 << 1)
116#define AT_XDMAC_CC_MBSIZE_SIXTEEN (0x3 << 1)
117#define AT_XDMAC_CC_DSYNC (0x1 << 4) /* Channel Synchronization */
118#define AT_XDMAC_CC_DSYNC_PER2MEM (0x0 << 4)
119#define AT_XDMAC_CC_DSYNC_MEM2PER (0x1 << 4)
120#define AT_XDMAC_CC_PROT (0x1 << 5) /* Channel Protection */
121#define AT_XDMAC_CC_PROT_SEC (0x0 << 5)
122#define AT_XDMAC_CC_PROT_UNSEC (0x1 << 5)
123#define AT_XDMAC_CC_SWREQ (0x1 << 6) /* Channel Software Request Trigger */
124#define AT_XDMAC_CC_SWREQ_HWR_CONNECTED (0x0 << 6)
125#define AT_XDMAC_CC_SWREQ_SWR_CONNECTED (0x1 << 6)
126#define AT_XDMAC_CC_MEMSET (0x1 << 7) /* Channel Fill Block of memory */
127#define AT_XDMAC_CC_MEMSET_NORMAL_MODE (0x0 << 7)
128#define AT_XDMAC_CC_MEMSET_HW_MODE (0x1 << 7)
129#define AT_XDMAC_CC_CSIZE(i) ((0x7 & (i)) << 8) /* Channel Chunk Size */
130#define AT_XDMAC_CC_DWIDTH_OFFSET 11
131#define AT_XDMAC_CC_DWIDTH_MASK (0x3 << AT_XDMAC_CC_DWIDTH_OFFSET)
132#define AT_XDMAC_CC_DWIDTH(i) ((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET) /* Channel Data Width */
133#define AT_XDMAC_CC_DWIDTH_BYTE 0x0
134#define AT_XDMAC_CC_DWIDTH_HALFWORD 0x1
135#define AT_XDMAC_CC_DWIDTH_WORD 0x2
136#define AT_XDMAC_CC_DWIDTH_DWORD 0x3
137#define AT_XDMAC_CC_SIF(i) ((0x1 & (i)) << 13) /* Channel Source Interface Identifier */
138#define AT_XDMAC_CC_DIF(i) ((0x1 & (i)) << 14) /* Channel Destination Interface Identifier */
139#define AT_XDMAC_CC_SAM_MASK (0x3 << 16) /* Channel Source Addressing Mode */
140#define AT_XDMAC_CC_SAM_FIXED_AM (0x0 << 16)
141#define AT_XDMAC_CC_SAM_INCREMENTED_AM (0x1 << 16)
142#define AT_XDMAC_CC_SAM_UBS_AM (0x2 << 16)
143#define AT_XDMAC_CC_SAM_UBS_DS_AM (0x3 << 16)
144#define AT_XDMAC_CC_DAM_MASK (0x3 << 18) /* Channel Source Addressing Mode */
145#define AT_XDMAC_CC_DAM_FIXED_AM (0x0 << 18)
146#define AT_XDMAC_CC_DAM_INCREMENTED_AM (0x1 << 18)
147#define AT_XDMAC_CC_DAM_UBS_AM (0x2 << 18)
148#define AT_XDMAC_CC_DAM_UBS_DS_AM (0x3 << 18)
149#define AT_XDMAC_CC_INITD (0x1 << 21) /* Channel Initialization Terminated (read only) */
150#define AT_XDMAC_CC_INITD_TERMINATED (0x0 << 21)
151#define AT_XDMAC_CC_INITD_IN_PROGRESS (0x1 << 21)
152#define AT_XDMAC_CC_RDIP (0x1 << 22) /* Read in Progress (read only) */
153#define AT_XDMAC_CC_RDIP_DONE (0x0 << 22)
154#define AT_XDMAC_CC_RDIP_IN_PROGRESS (0x1 << 22)
155#define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */
156#define AT_XDMAC_CC_WRIP_DONE (0x0 << 23)
157#define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23)
158#define AT_XDMAC_CC_PERID(i) (0x7f & (h) << 24) /* Channel Peripheral Identifier */
159#define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */
160#define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
161#define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
162
163#define AT_XDMAC_CHAN_REG_BASE 0x50 /* Channel registers base address */
164
165/* Microblock control members */
166#define AT_XDMAC_MBR_UBC_UBLEN_MAX 0xFFFFFFUL /* Maximum Microblock Length */
167#define AT_XDMAC_MBR_UBC_NDE (0x1 << 24) /* Next Descriptor Enable */
168#define AT_XDMAC_MBR_UBC_NSEN (0x1 << 25) /* Next Descriptor Source Update */
169#define AT_XDMAC_MBR_UBC_NDEN (0x1 << 26) /* Next Descriptor Destination Update */
170#define AT_XDMAC_MBR_UBC_NDV0 (0x0 << 27) /* Next Descriptor View 0 */
171#define AT_XDMAC_MBR_UBC_NDV1 (0x1 << 27) /* Next Descriptor View 1 */
172#define AT_XDMAC_MBR_UBC_NDV2 (0x2 << 27) /* Next Descriptor View 2 */
173#define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */
174
175#define AT_XDMAC_MAX_CHAN 0x20
176
177enum atc_status {
178 AT_XDMAC_CHAN_IS_CYCLIC = 0,
179 AT_XDMAC_CHAN_IS_PAUSED,
180};
181
182/* ----- Channels ----- */
183struct at_xdmac_chan {
184 struct dma_chan chan;
185 void __iomem *ch_regs;
186 u32 mask; /* Channel Mask */
187 u32 cfg[3]; /* Channel Configuration Register */
188 #define AT_XDMAC_CUR_CFG 0 /* Current channel conf */
189 #define AT_XDMAC_DEV_TO_MEM_CFG 1 /* Predifined dev to mem channel conf */
190 #define AT_XDMAC_MEM_TO_DEV_CFG 2 /* Predifined mem to dev channel conf */
191 u8 perid; /* Peripheral ID */
192 u8 perif; /* Peripheral Interface */
193 u8 memif; /* Memory Interface */
194 u32 per_src_addr;
195 u32 per_dst_addr;
196 u32 save_cim;
197 u32 save_cnda;
198 u32 save_cndc;
199 unsigned long status;
200 struct tasklet_struct tasklet;
201
202 spinlock_t lock;
203
204 struct list_head xfers_list;
205 struct list_head free_descs_list;
206};
207
208
209/* ----- Controller ----- */
210struct at_xdmac {
211 struct dma_device dma;
212 void __iomem *regs;
213 int irq;
214 struct clk *clk;
215 u32 save_gim;
216 u32 save_gs;
217 struct dma_pool *at_xdmac_desc_pool;
218 struct at_xdmac_chan chan[0];
219};
220
221
222/* ----- Descriptors ----- */
223
224/* Linked List Descriptor */
225struct at_xdmac_lld {
226 dma_addr_t mbr_nda; /* Next Descriptor Member */
227 u32 mbr_ubc; /* Microblock Control Member */
228 dma_addr_t mbr_sa; /* Source Address Member */
229 dma_addr_t mbr_da; /* Destination Address Member */
230 u32 mbr_cfg; /* Configuration Register */
231};
232
233
234struct at_xdmac_desc {
235 struct at_xdmac_lld lld;
236 enum dma_transfer_direction direction;
237 struct dma_async_tx_descriptor tx_dma_desc;
238 struct list_head desc_node;
239 /* Following members are only used by the first descriptor */
240 bool active_xfer;
241 unsigned int xfer_size;
242 struct list_head descs_list;
243 struct list_head xfer_node;
244};
245
246static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
247{
248 return atxdmac->regs + (AT_XDMAC_CHAN_REG_BASE + chan_nb * 0x40);
249}
250
251#define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg))
252#define at_xdmac_write(atxdmac, reg, value) \
253 writel_relaxed((value), (atxdmac)->regs + (reg))
254
255#define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg))
256#define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg))
257
258static inline struct at_xdmac_chan *to_at_xdmac_chan(struct dma_chan *dchan)
259{
260 return container_of(dchan, struct at_xdmac_chan, chan);
261}
262
263static struct device *chan2dev(struct dma_chan *chan)
264{
265 return &chan->dev->device;
266}
267
268static inline struct at_xdmac *to_at_xdmac(struct dma_device *ddev)
269{
270 return container_of(ddev, struct at_xdmac, dma);
271}
272
273static inline struct at_xdmac_desc *txd_to_at_desc(struct dma_async_tx_descriptor *txd)
274{
275 return container_of(txd, struct at_xdmac_desc, tx_dma_desc);
276}
277
278static inline int at_xdmac_chan_is_cyclic(struct at_xdmac_chan *atchan)
279{
280 return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
281}
282
283static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan)
284{
285 return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
286}
287
288static inline int at_xdmac_csize(u32 maxburst)
289{
290 int csize;
291
292 csize = ffs(maxburst) - 1;
293 if (csize > 4)
294 csize = -EINVAL;
295
296 return csize;
297};
298
299static inline u8 at_xdmac_get_dwidth(u32 cfg)
300{
301 return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
302};
303
304static unsigned int init_nr_desc_per_channel = 64;
305module_param(init_nr_desc_per_channel, uint, 0644);
306MODULE_PARM_DESC(init_nr_desc_per_channel,
307 "initial descriptors per channel (default: 64)");
308
309
310static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan)
311{
312 return at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask;
313}
314
315static void at_xdmac_off(struct at_xdmac *atxdmac)
316{
317 at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L);
318
319 /* Wait that all chans are disabled. */
320 while (at_xdmac_read(atxdmac, AT_XDMAC_GS))
321 cpu_relax();
322
323 at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L);
324}
325
326/* Call with lock hold. */
327static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
328 struct at_xdmac_desc *first)
329{
330 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
331 u32 reg;
332
333 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first);
334
335 if (at_xdmac_chan_is_enabled(atchan))
336 return;
337
338 /* Set transfer as active to not try to start it again. */
339 first->active_xfer = true;
340
341 /* Tell xdmac where to get the first descriptor. */
342 reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys)
343 | AT_XDMAC_CNDA_NDAIF(atchan->memif);
344 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
345
346 /*
347 * When doing memory to memory transfer we need to use the next
348 * descriptor view 2 since some fields of the configuration register
349 * depend on transfer size and src/dest addresses.
350 */
351 if (is_slave_direction(first->direction)) {
352 reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
353 if (first->direction == DMA_MEM_TO_DEV)
354 atchan->cfg[AT_XDMAC_CUR_CFG] =
355 atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
356 else
357 atchan->cfg[AT_XDMAC_CUR_CFG] =
358 atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
359 at_xdmac_chan_write(atchan, AT_XDMAC_CC,
360 atchan->cfg[AT_XDMAC_CUR_CFG]);
361 } else {
362 /*
363 * No need to write AT_XDMAC_CC reg, it will be done when the
364 * descriptor is fecthed.
365 */
366 reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
367 }
368
369 reg |= AT_XDMAC_CNDC_NDDUP
370 | AT_XDMAC_CNDC_NDSUP
371 | AT_XDMAC_CNDC_NDE;
372 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, reg);
373
374 dev_vdbg(chan2dev(&atchan->chan),
375 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
376 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
377 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
378 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
379 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
380 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
381 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
382
383 at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
384 reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE | AT_XDMAC_CIE_ROIE;
385 /*
386 * There is no end of list when doing cyclic dma, we need to get
387 * an interrupt after each periods.
388 */
389 if (at_xdmac_chan_is_cyclic(atchan))
390 at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
391 reg | AT_XDMAC_CIE_BIE);
392 else
393 at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
394 reg | AT_XDMAC_CIE_LIE);
395 at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask);
396 dev_vdbg(chan2dev(&atchan->chan),
397 "%s: enable channel (0x%08x)\n", __func__, atchan->mask);
398 wmb();
399 at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
400
401 dev_vdbg(chan2dev(&atchan->chan),
402 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
403 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
404 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
405 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
406 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
407 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
408 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
409
410}
411
412static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
413{
414 struct at_xdmac_desc *desc = txd_to_at_desc(tx);
415 struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan);
416 dma_cookie_t cookie;
417
418 spin_lock_bh(&atchan->lock);
419 cookie = dma_cookie_assign(tx);
420
421 dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
422 __func__, atchan, desc);
423 list_add_tail(&desc->xfer_node, &atchan->xfers_list);
424 if (list_is_singular(&atchan->xfers_list))
425 at_xdmac_start_xfer(atchan, desc);
426
427 spin_unlock_bh(&atchan->lock);
428 return cookie;
429}
430
431static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
432 gfp_t gfp_flags)
433{
434 struct at_xdmac_desc *desc;
435 struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
436 dma_addr_t phys;
437
438 desc = dma_pool_alloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
439 if (desc) {
440 memset(desc, 0, sizeof(*desc));
441 INIT_LIST_HEAD(&desc->descs_list);
442 dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
443 desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
444 desc->tx_dma_desc.phys = phys;
445 }
446
447 return desc;
448}
449
450/* Call must be protected by lock. */
451static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
452{
453 struct at_xdmac_desc *desc;
454
455 if (list_empty(&atchan->free_descs_list)) {
456 desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT);
457 } else {
458 desc = list_first_entry(&atchan->free_descs_list,
459 struct at_xdmac_desc, desc_node);
460 list_del(&desc->desc_node);
461 desc->active_xfer = false;
462 }
463
464 return desc;
465}
466
467static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
468 struct of_dma *of_dma)
469{
470 struct at_xdmac *atxdmac = of_dma->of_dma_data;
471 struct at_xdmac_chan *atchan;
472 struct dma_chan *chan;
473 struct device *dev = atxdmac->dma.dev;
474
475 if (dma_spec->args_count != 1) {
476 dev_err(dev, "dma phandler args: bad number of args\n");
477 return NULL;
478 }
479
480 chan = dma_get_any_slave_channel(&atxdmac->dma);
481 if (!chan) {
482 dev_err(dev, "can't get a dma channel\n");
483 return NULL;
484 }
485
486 atchan = to_at_xdmac_chan(chan);
487 atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]);
488 atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]);
489 atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]);
490 dev_dbg(dev, "chan dt cfg: memif=%u perif=%u perid=%u\n",
491 atchan->memif, atchan->perif, atchan->perid);
492
493 return chan;
494}
495
496static int at_xdmac_set_slave_config(struct dma_chan *chan,
497 struct dma_slave_config *sconfig)
498{
499 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
500 u8 dwidth;
501 int csize;
502
503 atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] =
504 AT91_XDMAC_DT_PERID(atchan->perid)
505 | AT_XDMAC_CC_DAM_INCREMENTED_AM
506 | AT_XDMAC_CC_SAM_FIXED_AM
507 | AT_XDMAC_CC_DIF(atchan->memif)
508 | AT_XDMAC_CC_SIF(atchan->perif)
509 | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
510 | AT_XDMAC_CC_DSYNC_PER2MEM
511 | AT_XDMAC_CC_MBSIZE_SIXTEEN
512 | AT_XDMAC_CC_TYPE_PER_TRAN;
513 csize = at_xdmac_csize(sconfig->src_maxburst);
514 if (csize < 0) {
515 dev_err(chan2dev(chan), "invalid src maxburst value\n");
516 return -EINVAL;
517 }
518 atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_CSIZE(csize);
519 dwidth = ffs(sconfig->src_addr_width) - 1;
520 atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth);
521
522
523 atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] =
524 AT91_XDMAC_DT_PERID(atchan->perid)
525 | AT_XDMAC_CC_DAM_FIXED_AM
526 | AT_XDMAC_CC_SAM_INCREMENTED_AM
527 | AT_XDMAC_CC_DIF(atchan->perif)
528 | AT_XDMAC_CC_SIF(atchan->memif)
529 | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
530 | AT_XDMAC_CC_DSYNC_MEM2PER
531 | AT_XDMAC_CC_MBSIZE_SIXTEEN
532 | AT_XDMAC_CC_TYPE_PER_TRAN;
533 csize = at_xdmac_csize(sconfig->dst_maxburst);
534 if (csize < 0) {
535 dev_err(chan2dev(chan), "invalid src maxburst value\n");
536 return -EINVAL;
537 }
538 atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_CSIZE(csize);
539 dwidth = ffs(sconfig->dst_addr_width) - 1;
540 atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth);
541
542 /* Src and dst addr are needed to configure the link list descriptor. */
543 atchan->per_src_addr = sconfig->src_addr;
544 atchan->per_dst_addr = sconfig->dst_addr;
545
546 dev_dbg(chan2dev(chan),
547 "%s: cfg[dev2mem]=0x%08x, cfg[mem2dev]=0x%08x, per_src_addr=0x%08x, per_dst_addr=0x%08x\n",
548 __func__, atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG],
549 atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG],
550 atchan->per_src_addr, atchan->per_dst_addr);
551
552 return 0;
553}
554
555static struct dma_async_tx_descriptor *
556at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
557 unsigned int sg_len, enum dma_transfer_direction direction,
558 unsigned long flags, void *context)
559{
560 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
561 struct at_xdmac_desc *first = NULL, *prev = NULL;
562 struct scatterlist *sg;
563 int i;
564 u32 cfg;
565 unsigned int xfer_size = 0;
566
567 if (!sgl)
568 return NULL;
569
570 if (!is_slave_direction(direction)) {
571 dev_err(chan2dev(chan), "invalid DMA direction\n");
572 return NULL;
573 }
574
575 dev_dbg(chan2dev(chan), "%s: sg_len=%d, dir=%s, flags=0x%lx\n",
576 __func__, sg_len,
577 direction == DMA_MEM_TO_DEV ? "to device" : "from device",
578 flags);
579
580 /* Protect dma_sconfig field that can be modified by set_slave_conf. */
581 spin_lock_bh(&atchan->lock);
582
583 /* Prepare descriptors. */
584 for_each_sg(sgl, sg, sg_len, i) {
585 struct at_xdmac_desc *desc = NULL;
586 u32 len, mem;
587
588 len = sg_dma_len(sg);
589 mem = sg_dma_address(sg);
590 if (unlikely(!len)) {
591 dev_err(chan2dev(chan), "sg data length is zero\n");
592 spin_unlock_bh(&atchan->lock);
593 return NULL;
594 }
595 dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
596 __func__, i, len, mem);
597
598 desc = at_xdmac_get_desc(atchan);
599 if (!desc) {
600 dev_err(chan2dev(chan), "can't get descriptor\n");
601 if (first)
602 list_splice_init(&first->descs_list, &atchan->free_descs_list);
603 spin_unlock_bh(&atchan->lock);
604 return NULL;
605 }
606
607 /* Linked list descriptor setup. */
608 if (direction == DMA_DEV_TO_MEM) {
609 desc->lld.mbr_sa = atchan->per_src_addr;
610 desc->lld.mbr_da = mem;
611 cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
612 } else {
613 desc->lld.mbr_sa = mem;
614 desc->lld.mbr_da = atchan->per_dst_addr;
615 cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
616 }
617 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 /* next descriptor view */
618 | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
619 | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
620 | (i == sg_len - 1 ? 0 : AT_XDMAC_MBR_UBC_NDE) /* descriptor fetch */
621 | len / (1 << at_xdmac_get_dwidth(cfg)); /* microblock length */
622 dev_dbg(chan2dev(chan),
623 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
624 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
625
626 /* Chain lld. */
627 if (prev) {
628 prev->lld.mbr_nda = desc->tx_dma_desc.phys;
629 dev_dbg(chan2dev(chan),
630 "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
631 __func__, prev, &prev->lld.mbr_nda);
632 }
633
634 prev = desc;
635 if (!first)
636 first = desc;
637
638 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
639 __func__, desc, first);
640 list_add_tail(&desc->desc_node, &first->descs_list);
641 xfer_size += len;
642 }
643
644 spin_unlock_bh(&atchan->lock);
645
646 first->tx_dma_desc.flags = flags;
647 first->xfer_size = xfer_size;
648 first->direction = direction;
649
650 return &first->tx_dma_desc;
651}
652
653static struct dma_async_tx_descriptor *
654at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
655 size_t buf_len, size_t period_len,
656 enum dma_transfer_direction direction,
657 unsigned long flags)
658{
659 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
660 struct at_xdmac_desc *first = NULL, *prev = NULL;
661 unsigned int periods = buf_len / period_len;
662 int i;
663 u32 cfg;
664
665 dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
666 __func__, &buf_addr, buf_len, period_len,
667 direction == DMA_MEM_TO_DEV ? "mem2per" : "per2mem", flags);
668
669 if (!is_slave_direction(direction)) {
670 dev_err(chan2dev(chan), "invalid DMA direction\n");
671 return NULL;
672 }
673
674 if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) {
675 dev_err(chan2dev(chan), "channel currently used\n");
676 return NULL;
677 }
678
679 for (i = 0; i < periods; i++) {
680 struct at_xdmac_desc *desc = NULL;
681
682 spin_lock_bh(&atchan->lock);
683 desc = at_xdmac_get_desc(atchan);
684 if (!desc) {
685 dev_err(chan2dev(chan), "can't get descriptor\n");
686 if (first)
687 list_splice_init(&first->descs_list, &atchan->free_descs_list);
688 spin_unlock_bh(&atchan->lock);
689 return NULL;
690 }
691 spin_unlock_bh(&atchan->lock);
692 dev_dbg(chan2dev(chan),
693 "%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
694 __func__, desc, &desc->tx_dma_desc.phys);
695
696 if (direction == DMA_DEV_TO_MEM) {
697 desc->lld.mbr_sa = atchan->per_src_addr;
698 desc->lld.mbr_da = buf_addr + i * period_len;
699 cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
700 } else {
701 desc->lld.mbr_sa = buf_addr + i * period_len;
702 desc->lld.mbr_da = atchan->per_dst_addr;
703 cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
704 }
705 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
706 | AT_XDMAC_MBR_UBC_NDEN
707 | AT_XDMAC_MBR_UBC_NSEN
708 | AT_XDMAC_MBR_UBC_NDE
709 | period_len >> at_xdmac_get_dwidth(cfg);
710
711 dev_dbg(chan2dev(chan),
712 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
713 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
714
715 /* Chain lld. */
716 if (prev) {
717 prev->lld.mbr_nda = desc->tx_dma_desc.phys;
718 dev_dbg(chan2dev(chan),
719 "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
720 __func__, prev, &prev->lld.mbr_nda);
721 }
722
723 prev = desc;
724 if (!first)
725 first = desc;
726
727 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
728 __func__, desc, first);
729 list_add_tail(&desc->desc_node, &first->descs_list);
730 }
731
732 prev->lld.mbr_nda = first->tx_dma_desc.phys;
733 dev_dbg(chan2dev(chan),
734 "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
735 __func__, prev, &prev->lld.mbr_nda);
736 first->tx_dma_desc.flags = flags;
737 first->xfer_size = buf_len;
738 first->direction = direction;
739
740 return &first->tx_dma_desc;
741}
742
743static struct dma_async_tx_descriptor *
744at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
745 size_t len, unsigned long flags)
746{
747 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
748 struct at_xdmac_desc *first = NULL, *prev = NULL;
749 size_t remaining_size = len, xfer_size = 0, ublen;
750 dma_addr_t src_addr = src, dst_addr = dest;
751 u32 dwidth;
752 /*
753 * WARNING: We don't know the direction, it involves we can't
754 * dynamically set the source and dest interface so we have to use the
755 * same one. Only interface 0 allows EBI access. Hopefully we can
756 * access DDR through both ports (at least on SAMA5D4x), so we can use
757 * the same interface for source and dest, that solves the fact we
758 * don't know the direction.
759 */
760 u32 chan_cc = AT_XDMAC_CC_DAM_INCREMENTED_AM
761 | AT_XDMAC_CC_SAM_INCREMENTED_AM
762 | AT_XDMAC_CC_DIF(0)
763 | AT_XDMAC_CC_SIF(0)
764 | AT_XDMAC_CC_MBSIZE_SIXTEEN
765 | AT_XDMAC_CC_TYPE_MEM_TRAN;
766
767 dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
768 __func__, &src, &dest, len, flags);
769
770 if (unlikely(!len))
771 return NULL;
772
773 /*
774 * Check address alignment to select the greater data width we can use.
775 * Some XDMAC implementations don't provide dword transfer, in this
776 * case selecting dword has the same behavior as selecting word transfers.
777 */
778 if (!((src_addr | dst_addr) & 7)) {
779 dwidth = AT_XDMAC_CC_DWIDTH_DWORD;
780 dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
781 } else if (!((src_addr | dst_addr) & 3)) {
782 dwidth = AT_XDMAC_CC_DWIDTH_WORD;
783 dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
784 } else if (!((src_addr | dst_addr) & 1)) {
785 dwidth = AT_XDMAC_CC_DWIDTH_HALFWORD;
786 dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
787 } else {
788 dwidth = AT_XDMAC_CC_DWIDTH_BYTE;
789 dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
790 }
791
792 /* Prepare descriptors. */
793 while (remaining_size) {
794 struct at_xdmac_desc *desc = NULL;
795
796 dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
797
798 spin_lock_bh(&atchan->lock);
799 desc = at_xdmac_get_desc(atchan);
800 spin_unlock_bh(&atchan->lock);
801 if (!desc) {
802 dev_err(chan2dev(chan), "can't get descriptor\n");
803 if (first)
804 list_splice_init(&first->descs_list, &atchan->free_descs_list);
805 return NULL;
806 }
807
808 /* Update src and dest addresses. */
809 src_addr += xfer_size;
810 dst_addr += xfer_size;
811
812 if (remaining_size >= AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)
813 xfer_size = AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth;
814 else
815 xfer_size = remaining_size;
816
817 dev_dbg(chan2dev(chan), "%s: xfer_size=%zu\n", __func__, xfer_size);
818
819 /* Check remaining length and change data width if needed. */
820 if (!((src_addr | dst_addr | xfer_size) & 7)) {
821 dwidth = AT_XDMAC_CC_DWIDTH_DWORD;
822 dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
823 } else if (!((src_addr | dst_addr | xfer_size) & 3)) {
824 dwidth = AT_XDMAC_CC_DWIDTH_WORD;
825 dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
826 } else if (!((src_addr | dst_addr | xfer_size) & 1)) {
827 dwidth = AT_XDMAC_CC_DWIDTH_HALFWORD;
828 dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
829 } else if ((src_addr | dst_addr | xfer_size) & 1) {
830 dwidth = AT_XDMAC_CC_DWIDTH_BYTE;
831 dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
832 }
833 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
834
835 ublen = xfer_size >> dwidth;
836 remaining_size -= xfer_size;
837
838 desc->lld.mbr_sa = src_addr;
839 desc->lld.mbr_da = dst_addr;
840 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2
841 | AT_XDMAC_MBR_UBC_NDEN
842 | AT_XDMAC_MBR_UBC_NSEN
843 | (remaining_size ? AT_XDMAC_MBR_UBC_NDE : 0)
844 | ublen;
845 desc->lld.mbr_cfg = chan_cc;
846
847 dev_dbg(chan2dev(chan),
848 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
849 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg);
850
851 /* Chain lld. */
852 if (prev) {
853 prev->lld.mbr_nda = desc->tx_dma_desc.phys;
854 dev_dbg(chan2dev(chan),
855 "%s: chain lld: prev=0x%p, mbr_nda=0x%08x\n",
856 __func__, prev, prev->lld.mbr_nda);
857 }
858
859 prev = desc;
860 if (!first)
861 first = desc;
862
863 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
864 __func__, desc, first);
865 list_add_tail(&desc->desc_node, &first->descs_list);
866 }
867
868 first->tx_dma_desc.flags = flags;
869 first->xfer_size = len;
870
871 return &first->tx_dma_desc;
872}
873
874static enum dma_status
875at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
876 struct dma_tx_state *txstate)
877{
878 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
879 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
880 struct at_xdmac_desc *desc, *_desc;
881 struct list_head *descs_list;
882 enum dma_status ret;
883 int residue;
884 u32 cur_nda, mask, value;
885 u8 dwidth = at_xdmac_get_dwidth(atchan->cfg[AT_XDMAC_CUR_CFG]);
886
887 ret = dma_cookie_status(chan, cookie, txstate);
888 if (ret == DMA_COMPLETE)
889 return ret;
890
891 if (!txstate)
892 return ret;
893
894 spin_lock_bh(&atchan->lock);
895
896 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
897
898 /*
899 * If the transfer has not been started yet, don't need to compute the
900 * residue, it's the transfer length.
901 */
902 if (!desc->active_xfer) {
903 dma_set_residue(txstate, desc->xfer_size);
904 spin_unlock_bh(&atchan->lock);
905 return ret;
906 }
907
908 residue = desc->xfer_size;
909 /*
910 * Flush FIFO: only relevant when the transfer is source peripheral
911 * synchronized.
912 */
913 mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
914 value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
915 if ((atchan->cfg[AT_XDMAC_CUR_CFG] & mask) == value) {
916 at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
917 while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
918 cpu_relax();
919 }
920
921 cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
922 /*
923 * Remove size of all microblocks already transferred and the current
924 * one. Then add the remaining size to transfer of the current
925 * microblock.
926 */
927 descs_list = &desc->descs_list;
928 list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
929 residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
930 if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
931 break;
932 }
933 residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth;
934
935 spin_unlock_bh(&atchan->lock);
936
937 dma_set_residue(txstate, residue);
938
939 dev_dbg(chan2dev(chan),
940 "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
941 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
942
943 return ret;
944}
945
946/* Call must be protected by lock. */
947static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
948 struct at_xdmac_desc *desc)
949{
950 dev_dbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
951
952 /*
953 * Remove the transfer from the transfer list then move the transfer
954 * descriptors into the free descriptors list.
955 */
956 list_del(&desc->xfer_node);
957 list_splice_init(&desc->descs_list, &atchan->free_descs_list);
958}
959
960static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
961{
962 struct at_xdmac_desc *desc;
963
964 spin_lock_bh(&atchan->lock);
965
966 /*
967 * If channel is enabled, do nothing, advance_work will be triggered
968 * after the interruption.
969 */
970 if (!at_xdmac_chan_is_enabled(atchan) && !list_empty(&atchan->xfers_list)) {
971 desc = list_first_entry(&atchan->xfers_list,
972 struct at_xdmac_desc,
973 xfer_node);
974 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
975 if (!desc->active_xfer)
976 at_xdmac_start_xfer(atchan, desc);
977 }
978
979 spin_unlock_bh(&atchan->lock);
980}
981
982static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
983{
984 struct at_xdmac_desc *desc;
985 struct dma_async_tx_descriptor *txd;
986
987 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
988 txd = &desc->tx_dma_desc;
989
990 if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT))
991 txd->callback(txd->callback_param);
992}
993
994static void at_xdmac_tasklet(unsigned long data)
995{
996 struct at_xdmac_chan *atchan = (struct at_xdmac_chan *)data;
997 struct at_xdmac_desc *desc;
998 u32 error_mask;
999
1000 dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n",
1001 __func__, atchan->status);
1002
1003 error_mask = AT_XDMAC_CIS_RBEIS
1004 | AT_XDMAC_CIS_WBEIS
1005 | AT_XDMAC_CIS_ROIS;
1006
1007 if (at_xdmac_chan_is_cyclic(atchan)) {
1008 at_xdmac_handle_cyclic(atchan);
1009 } else if ((atchan->status & AT_XDMAC_CIS_LIS)
1010 || (atchan->status & error_mask)) {
1011 struct dma_async_tx_descriptor *txd;
1012
1013 if (atchan->status & AT_XDMAC_CIS_RBEIS)
1014 dev_err(chan2dev(&atchan->chan), "read bus error!!!");
1015 if (atchan->status & AT_XDMAC_CIS_WBEIS)
1016 dev_err(chan2dev(&atchan->chan), "write bus error!!!");
1017 if (atchan->status & AT_XDMAC_CIS_ROIS)
1018 dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
1019
1020 spin_lock_bh(&atchan->lock);
1021 desc = list_first_entry(&atchan->xfers_list,
1022 struct at_xdmac_desc,
1023 xfer_node);
1024 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1025 BUG_ON(!desc->active_xfer);
1026
1027 txd = &desc->tx_dma_desc;
1028
1029 at_xdmac_remove_xfer(atchan, desc);
1030 spin_unlock_bh(&atchan->lock);
1031
1032 if (!at_xdmac_chan_is_cyclic(atchan)) {
1033 dma_cookie_complete(txd);
1034 if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT))
1035 txd->callback(txd->callback_param);
1036 }
1037
1038 dma_run_dependencies(txd);
1039
1040 at_xdmac_advance_work(atchan);
1041 }
1042}
1043
1044static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
1045{
1046 struct at_xdmac *atxdmac = (struct at_xdmac *)dev_id;
1047 struct at_xdmac_chan *atchan;
1048 u32 imr, status, pending;
1049 u32 chan_imr, chan_status;
1050 int i, ret = IRQ_NONE;
1051
1052 do {
1053 imr = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
1054 status = at_xdmac_read(atxdmac, AT_XDMAC_GIS);
1055 pending = status & imr;
1056
1057 dev_vdbg(atxdmac->dma.dev,
1058 "%s: status=0x%08x, imr=0x%08x, pending=0x%08x\n",
1059 __func__, status, imr, pending);
1060
1061 if (!pending)
1062 break;
1063
1064 /* We have to find which channel has generated the interrupt. */
1065 for (i = 0; i < atxdmac->dma.chancnt; i++) {
1066 if (!((1 << i) & pending))
1067 continue;
1068
1069 atchan = &atxdmac->chan[i];
1070 chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1071 chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
1072 atchan->status = chan_status & chan_imr;
1073 dev_vdbg(atxdmac->dma.dev,
1074 "%s: chan%d: imr=0x%x, status=0x%x\n",
1075 __func__, i, chan_imr, chan_status);
1076 dev_vdbg(chan2dev(&atchan->chan),
1077 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
1078 __func__,
1079 at_xdmac_chan_read(atchan, AT_XDMAC_CC),
1080 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
1081 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
1082 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
1083 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
1084 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
1085
1086 if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
1087 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1088
1089 tasklet_schedule(&atchan->tasklet);
1090 ret = IRQ_HANDLED;
1091 }
1092
1093 } while (pending);
1094
1095 return ret;
1096}
1097
1098static void at_xdmac_issue_pending(struct dma_chan *chan)
1099{
1100 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1101
1102 dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
1103
1104 if (!at_xdmac_chan_is_cyclic(atchan))
1105 at_xdmac_advance_work(atchan);
1106
1107 return;
1108}
1109
1110static int at_xdmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1111 unsigned long arg)
1112{
1113 struct at_xdmac_desc *desc, *_desc;
1114 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1115 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1116 int ret = 0;
1117
1118 dev_dbg(chan2dev(chan), "%s: cmd=%d\n", __func__, cmd);
1119
1120 spin_lock_bh(&atchan->lock);
1121
1122 switch (cmd) {
1123 case DMA_PAUSE:
1124 at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
1125 set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1126 break;
1127
1128 case DMA_RESUME:
1129 if (!at_xdmac_chan_is_paused(atchan))
1130 break;
1131
1132 at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
1133 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1134 break;
1135
1136 case DMA_TERMINATE_ALL:
1137 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1138 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
1139 cpu_relax();
1140
1141 /* Cancel all pending transfers. */
1142 list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
1143 at_xdmac_remove_xfer(atchan, desc);
1144
1145 clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
1146 break;
1147
1148 case DMA_SLAVE_CONFIG:
1149 ret = at_xdmac_set_slave_config(chan,
1150 (struct dma_slave_config *)arg);
1151 break;
1152
1153 default:
1154 dev_err(chan2dev(chan),
1155 "unmanaged or unknown dma control cmd: %d\n", cmd);
1156 ret = -ENXIO;
1157 }
1158
1159 spin_unlock_bh(&atchan->lock);
1160
1161 return ret;
1162}
1163
1164static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
1165{
1166 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1167 struct at_xdmac_desc *desc;
1168 int i;
1169
1170 spin_lock_bh(&atchan->lock);
1171
1172 if (at_xdmac_chan_is_enabled(atchan)) {
1173 dev_err(chan2dev(chan),
1174 "can't allocate channel resources (channel enabled)\n");
1175 i = -EIO;
1176 goto spin_unlock;
1177 }
1178
1179 if (!list_empty(&atchan->free_descs_list)) {
1180 dev_err(chan2dev(chan),
1181 "can't allocate channel resources (channel not free from a previous use)\n");
1182 i = -EIO;
1183 goto spin_unlock;
1184 }
1185
1186 for (i = 0; i < init_nr_desc_per_channel; i++) {
1187 desc = at_xdmac_alloc_desc(chan, GFP_ATOMIC);
1188 if (!desc) {
1189 dev_warn(chan2dev(chan),
1190 "only %d descriptors have been allocated\n", i);
1191 break;
1192 }
1193 list_add_tail(&desc->desc_node, &atchan->free_descs_list);
1194 }
1195
1196 dma_cookie_init(chan);
1197
1198 dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
1199
1200spin_unlock:
1201 spin_unlock_bh(&atchan->lock);
1202 return i;
1203}
1204
1205static void at_xdmac_free_chan_resources(struct dma_chan *chan)
1206{
1207 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1208 struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
1209 struct at_xdmac_desc *desc, *_desc;
1210
1211 list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) {
1212 dev_dbg(chan2dev(chan), "%s: freeing descriptor %p\n", __func__, desc);
1213 list_del(&desc->desc_node);
1214 dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys);
1215 }
1216
1217 return;
1218}
1219
1220#define AT_XDMAC_DMA_BUSWIDTHS\
1221 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
1222 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
1223 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
1224 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
1225 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
1226
1227static int at_xdmac_device_slave_caps(struct dma_chan *dchan,
1228 struct dma_slave_caps *caps)
1229{
1230
1231 caps->src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
1232 caps->dstn_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
1233 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1234 caps->cmd_pause = true;
1235 caps->cmd_terminate = true;
1236 caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1237
1238 return 0;
1239}
1240
1241#ifdef CONFIG_PM
1242static int atmel_xdmac_prepare(struct device *dev)
1243{
1244 struct platform_device *pdev = to_platform_device(dev);
1245 struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
1246 struct dma_chan *chan, *_chan;
1247
1248 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1249 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1250
1251 /* Wait for transfer completion, except in cyclic case. */
1252 if (at_xdmac_chan_is_enabled(atchan) && !at_xdmac_chan_is_cyclic(atchan))
1253 return -EAGAIN;
1254 }
1255 return 0;
1256}
1257#else
1258# define atmel_xdmac_prepare NULL
1259#endif
1260
1261#ifdef CONFIG_PM_SLEEP
1262static int atmel_xdmac_suspend(struct device *dev)
1263{
1264 struct platform_device *pdev = to_platform_device(dev);
1265 struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
1266 struct dma_chan *chan, *_chan;
1267
1268 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1269 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1270
1271 if (at_xdmac_chan_is_cyclic(atchan)) {
1272 if (!at_xdmac_chan_is_paused(atchan))
1273 at_xdmac_control(chan, DMA_PAUSE, 0);
1274 atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1275 atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
1276 atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
1277 }
1278 }
1279 atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
1280
1281 at_xdmac_off(atxdmac);
1282 clk_disable_unprepare(atxdmac->clk);
1283 return 0;
1284}
1285
1286static int atmel_xdmac_resume(struct device *dev)
1287{
1288 struct platform_device *pdev = to_platform_device(dev);
1289 struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
1290 struct at_xdmac_chan *atchan;
1291 struct dma_chan *chan, *_chan;
1292 int i;
1293 u32 cfg;
1294
1295 clk_prepare_enable(atxdmac->clk);
1296
1297 /* Clear pending interrupts. */
1298 for (i = 0; i < atxdmac->dma.chancnt; i++) {
1299 atchan = &atxdmac->chan[i];
1300 while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
1301 cpu_relax();
1302 }
1303
1304 at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
1305 at_xdmac_write(atxdmac, AT_XDMAC_GE, atxdmac->save_gs);
1306 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1307 atchan = to_at_xdmac_chan(chan);
1308 cfg = atchan->cfg[AT_XDMAC_CUR_CFG];
1309 at_xdmac_chan_write(atchan, AT_XDMAC_CC, cfg);
1310 if (at_xdmac_chan_is_cyclic(atchan)) {
1311 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
1312 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
1313 at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
1314 wmb();
1315 at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
1316 }
1317 }
1318 return 0;
1319}
1320#endif /* CONFIG_PM_SLEEP */
1321
1322static int at_xdmac_probe(struct platform_device *pdev)
1323{
1324 struct resource *res;
1325 struct at_xdmac *atxdmac;
1326 int irq, size, nr_channels, i, ret;
1327 void __iomem *base;
1328 u32 reg;
1329
1330 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1331 if (!res)
1332 return -EINVAL;
1333
1334 irq = platform_get_irq(pdev, 0);
1335 if (irq < 0)
1336 return irq;
1337
1338 base = devm_ioremap_resource(&pdev->dev, res);
1339 if (IS_ERR(base))
1340 return PTR_ERR(base);
1341
1342 /*
1343 * Read number of xdmac channels, read helper function can't be used
1344 * since atxdmac is not yet allocated and we need to know the number
1345 * of channels to do the allocation.
1346 */
1347 reg = readl_relaxed(base + AT_XDMAC_GTYPE);
1348 nr_channels = AT_XDMAC_NB_CH(reg);
1349 if (nr_channels > AT_XDMAC_MAX_CHAN) {
1350 dev_err(&pdev->dev, "invalid number of channels (%u)\n",
1351 nr_channels);
1352 return -EINVAL;
1353 }
1354
1355 size = sizeof(*atxdmac);
1356 size += nr_channels * sizeof(struct at_xdmac_chan);
1357 atxdmac = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
1358 if (!atxdmac) {
1359 dev_err(&pdev->dev, "can't allocate at_xdmac structure\n");
1360 return -ENOMEM;
1361 }
1362
1363 atxdmac->regs = base;
1364 atxdmac->irq = irq;
1365
1366 atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk");
1367 if (IS_ERR(atxdmac->clk)) {
1368 dev_err(&pdev->dev, "can't get dma_clk\n");
1369 return PTR_ERR(atxdmac->clk);
1370 }
1371
1372 /* Do not use dev res to prevent races with tasklet */
1373 ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac);
1374 if (ret) {
1375 dev_err(&pdev->dev, "can't request irq\n");
1376 return ret;
1377 }
1378
1379 ret = clk_prepare_enable(atxdmac->clk);
1380 if (ret) {
1381 dev_err(&pdev->dev, "can't prepare or enable clock\n");
1382 goto err_free_irq;
1383 }
1384
1385 atxdmac->at_xdmac_desc_pool =
1386 dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
1387 sizeof(struct at_xdmac_desc), 4, 0);
1388 if (!atxdmac->at_xdmac_desc_pool) {
1389 dev_err(&pdev->dev, "no memory for descriptors dma pool\n");
1390 ret = -ENOMEM;
1391 goto err_clk_disable;
1392 }
1393
1394 dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask);
1395 dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask);
1396 dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask);
1397 /*
1398 * Without DMA_PRIVATE the driver is not able to allocate more than
1399 * one channel, second allocation fails in private_candidate.
1400 */
1401 dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask);
1402 atxdmac->dma.dev = &pdev->dev;
1403 atxdmac->dma.device_alloc_chan_resources = at_xdmac_alloc_chan_resources;
1404 atxdmac->dma.device_free_chan_resources = at_xdmac_free_chan_resources;
1405 atxdmac->dma.device_tx_status = at_xdmac_tx_status;
1406 atxdmac->dma.device_issue_pending = at_xdmac_issue_pending;
1407 atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic;
1408 atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy;
1409 atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg;
1410 atxdmac->dma.device_control = at_xdmac_control;
1411 atxdmac->dma.device_slave_caps = at_xdmac_device_slave_caps;
1412
1413 /* Disable all chans and interrupts. */
1414 at_xdmac_off(atxdmac);
1415
1416 /* Init channels. */
1417 INIT_LIST_HEAD(&atxdmac->dma.channels);
1418 for (i = 0; i < nr_channels; i++) {
1419 struct at_xdmac_chan *atchan = &atxdmac->chan[i];
1420
1421 atchan->chan.device = &atxdmac->dma;
1422 list_add_tail(&atchan->chan.device_node,
1423 &atxdmac->dma.channels);
1424
1425 atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i);
1426 atchan->mask = 1 << i;
1427
1428 spin_lock_init(&atchan->lock);
1429 INIT_LIST_HEAD(&atchan->xfers_list);
1430 INIT_LIST_HEAD(&atchan->free_descs_list);
1431 tasklet_init(&atchan->tasklet, at_xdmac_tasklet,
1432 (unsigned long)atchan);
1433
1434 /* Clear pending interrupts. */
1435 while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
1436 cpu_relax();
1437 }
1438 platform_set_drvdata(pdev, atxdmac);
1439
1440 ret = dma_async_device_register(&atxdmac->dma);
1441 if (ret) {
1442 dev_err(&pdev->dev, "fail to register DMA engine device\n");
1443 goto err_clk_disable;
1444 }
1445
1446 ret = of_dma_controller_register(pdev->dev.of_node,
1447 at_xdmac_xlate, atxdmac);
1448 if (ret) {
1449 dev_err(&pdev->dev, "could not register of dma controller\n");
1450 goto err_dma_unregister;
1451 }
1452
1453 dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n",
1454 nr_channels, atxdmac->regs);
1455
1456 return 0;
1457
1458err_dma_unregister:
1459 dma_async_device_unregister(&atxdmac->dma);
1460err_clk_disable:
1461 clk_disable_unprepare(atxdmac->clk);
1462err_free_irq:
1463 free_irq(atxdmac->irq, atxdmac->dma.dev);
1464 return ret;
1465}
1466
1467static int at_xdmac_remove(struct platform_device *pdev)
1468{
1469 struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
1470 int i;
1471
1472 at_xdmac_off(atxdmac);
1473 of_dma_controller_free(pdev->dev.of_node);
1474 dma_async_device_unregister(&atxdmac->dma);
1475 clk_disable_unprepare(atxdmac->clk);
1476
1477 synchronize_irq(atxdmac->irq);
1478
1479 free_irq(atxdmac->irq, atxdmac->dma.dev);
1480
1481 for (i = 0; i < atxdmac->dma.chancnt; i++) {
1482 struct at_xdmac_chan *atchan = &atxdmac->chan[i];
1483
1484 tasklet_kill(&atchan->tasklet);
1485 at_xdmac_free_chan_resources(&atchan->chan);
1486 }
1487
1488 return 0;
1489}
1490
1491static const struct dev_pm_ops atmel_xdmac_dev_pm_ops = {
1492 .prepare = atmel_xdmac_prepare,
1493 SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume)
1494};
1495
1496static const struct of_device_id atmel_xdmac_dt_ids[] = {
1497 {
1498 .compatible = "atmel,sama5d4-dma",
1499 }, {
1500 /* sentinel */
1501 }
1502};
1503MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids);
1504
1505static struct platform_driver at_xdmac_driver = {
1506 .probe = at_xdmac_probe,
1507 .remove = at_xdmac_remove,
1508 .driver = {
1509 .name = "at_xdmac",
1510 .owner = THIS_MODULE,
1511 .of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
1512 .pm = &atmel_xdmac_dev_pm_ops,
1513 }
1514};
1515
1516static int __init at_xdmac_init(void)
1517{
1518 return platform_driver_probe(&at_xdmac_driver, at_xdmac_probe);
1519}
1520subsys_initcall(at_xdmac_init);
1521
1522MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");
1523MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
1524MODULE_LICENSE("GPL");
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 68007974961a..918b7b3f766f 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -525,8 +525,6 @@ static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq)
525 vchan_init(&c->vc, &d->ddev); 525 vchan_init(&c->vc, &d->ddev);
526 INIT_LIST_HEAD(&c->node); 526 INIT_LIST_HEAD(&c->node);
527 527
528 d->ddev.chancnt++;
529
530 c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id); 528 c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id);
531 c->ch = chan_id; 529 c->ch = chan_id;
532 c->irq_number = irq; 530 c->irq_number = irq;
@@ -694,7 +692,6 @@ static struct platform_driver bcm2835_dma_driver = {
694 .remove = bcm2835_dma_remove, 692 .remove = bcm2835_dma_remove,
695 .driver = { 693 .driver = {
696 .name = "bcm2835-dma", 694 .name = "bcm2835-dma",
697 .owner = THIS_MODULE,
698 .of_match_table = of_match_ptr(bcm2835_dma_of_match), 695 .of_match_table = of_match_ptr(bcm2835_dma_of_match),
699 }, 696 },
700}; 697};
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index a58eec3b2cad..b743adf56465 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -1,3 +1,4 @@
1#include <linux/delay.h>
1#include <linux/dmaengine.h> 2#include <linux/dmaengine.h>
2#include <linux/dma-mapping.h> 3#include <linux/dma-mapping.h>
3#include <linux/platform_device.h> 4#include <linux/platform_device.h>
@@ -567,7 +568,7 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c)
567 reg |= GCR_TEARDOWN; 568 reg |= GCR_TEARDOWN;
568 cppi_writel(reg, c->gcr_reg); 569 cppi_writel(reg, c->gcr_reg);
569 c->td_queued = 1; 570 c->td_queued = 1;
570 c->td_retry = 100; 571 c->td_retry = 500;
571 } 572 }
572 573
573 if (!c->td_seen || !c->td_desc_seen) { 574 if (!c->td_seen || !c->td_desc_seen) {
@@ -603,12 +604,16 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c)
603 * descriptor before the TD we fetch it from enqueue, it has to be 604 * descriptor before the TD we fetch it from enqueue, it has to be
604 * there waiting for us. 605 * there waiting for us.
605 */ 606 */
606 if (!c->td_seen && c->td_retry) 607 if (!c->td_seen && c->td_retry) {
608 udelay(1);
607 return -EAGAIN; 609 return -EAGAIN;
608 610 }
609 WARN_ON(!c->td_retry); 611 WARN_ON(!c->td_retry);
612
610 if (!c->td_desc_seen) { 613 if (!c->td_desc_seen) {
611 desc_phys = cppi41_pop_desc(cdd, c->q_num); 614 desc_phys = cppi41_pop_desc(cdd, c->q_num);
615 if (!desc_phys)
616 desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
612 WARN_ON(!desc_phys); 617 WARN_ON(!desc_phys);
613 } 618 }
614 619
@@ -1088,7 +1093,6 @@ static struct platform_driver cpp41_dma_driver = {
1088 .remove = cppi41_dma_remove, 1093 .remove = cppi41_dma_remove,
1089 .driver = { 1094 .driver = {
1090 .name = "cppi41-dma-engine", 1095 .name = "cppi41-dma-engine",
1091 .owner = THIS_MODULE,
1092 .pm = &cppi41_pm_ops, 1096 .pm = &cppi41_pm_ops,
1093 .of_match_table = of_match_ptr(cppi41_dma_ids), 1097 .of_match_table = of_match_ptr(cppi41_dma_ids),
1094 }, 1098 },
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index ae2ab14e64b3..bdeafeefa5f6 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -563,10 +563,9 @@ static int jz4740_dma_probe(struct platform_device *pdev)
563 dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic; 563 dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic;
564 dd->device_control = jz4740_dma_control; 564 dd->device_control = jz4740_dma_control;
565 dd->dev = &pdev->dev; 565 dd->dev = &pdev->dev;
566 dd->chancnt = JZ_DMA_NR_CHANS;
567 INIT_LIST_HEAD(&dd->channels); 566 INIT_LIST_HEAD(&dd->channels);
568 567
569 for (i = 0; i < dd->chancnt; i++) { 568 for (i = 0; i < JZ_DMA_NR_CHANS; i++) {
570 chan = &dmadev->chan[i]; 569 chan = &dmadev->chan[i];
571 chan->id = i; 570 chan->id = i;
572 chan->vchan.desc_free = jz4740_dma_desc_free; 571 chan->vchan.desc_free = jz4740_dma_desc_free;
@@ -608,7 +607,6 @@ static struct platform_driver jz4740_dma_driver = {
608 .remove = jz4740_dma_remove, 607 .remove = jz4740_dma_remove,
609 .driver = { 608 .driver = {
610 .name = "jz4740-dma", 609 .name = "jz4740-dma",
611 .owner = THIS_MODULE,
612 }, 610 },
613}; 611};
614module_platform_driver(jz4740_dma_driver); 612module_platform_driver(jz4740_dma_driver);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 24bfaf0b92ba..e057935e3023 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -330,8 +330,7 @@ static int __init dma_channel_table_init(void)
330 if (err) { 330 if (err) {
331 pr_err("initialization failure\n"); 331 pr_err("initialization failure\n");
332 for_each_dma_cap_mask(cap, dma_cap_mask_all) 332 for_each_dma_cap_mask(cap, dma_cap_mask_all)
333 if (channel_table[cap]) 333 free_percpu(channel_table[cap]);
334 free_percpu(channel_table[cap]);
335 } 334 }
336 335
337 return err; 336 return err;
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index 3c5711d5fe97..6fb2e902b459 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -118,17 +118,17 @@
118 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) 118 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
119 119
120struct fsl_edma_hw_tcd { 120struct fsl_edma_hw_tcd {
121 u32 saddr; 121 __le32 saddr;
122 u16 soff; 122 __le16 soff;
123 u16 attr; 123 __le16 attr;
124 u32 nbytes; 124 __le32 nbytes;
125 u32 slast; 125 __le32 slast;
126 u32 daddr; 126 __le32 daddr;
127 u16 doff; 127 __le16 doff;
128 u16 citer; 128 __le16 citer;
129 u32 dlast_sga; 129 __le32 dlast_sga;
130 u16 csr; 130 __le16 csr;
131 u16 biter; 131 __le16 biter;
132}; 132};
133 133
134struct fsl_edma_sw_tcd { 134struct fsl_edma_sw_tcd {
@@ -175,18 +175,12 @@ struct fsl_edma_engine {
175}; 175};
176 176
177/* 177/*
178 * R/W functions for big- or little-endian registers 178 * R/W functions for big- or little-endian registers:
179 * the eDMA controller's endian is independent of the CPU core's endian. 179 * The eDMA controller's endian is independent of the CPU core's endian.
180 * For the big-endian IP module, the offset for 8-bit or 16-bit registers
181 * should also be swapped opposite to that in little-endian IP.
180 */ 182 */
181 183
182static u16 edma_readw(struct fsl_edma_engine *edma, void __iomem *addr)
183{
184 if (edma->big_endian)
185 return ioread16be(addr);
186 else
187 return ioread16(addr);
188}
189
190static u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr) 184static u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
191{ 185{
192 if (edma->big_endian) 186 if (edma->big_endian)
@@ -197,13 +191,18 @@ static u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
197 191
198static void edma_writeb(struct fsl_edma_engine *edma, u8 val, void __iomem *addr) 192static void edma_writeb(struct fsl_edma_engine *edma, u8 val, void __iomem *addr)
199{ 193{
200 iowrite8(val, addr); 194 /* swap the reg offset for these in big-endian mode */
195 if (edma->big_endian)
196 iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3));
197 else
198 iowrite8(val, addr);
201} 199}
202 200
203static void edma_writew(struct fsl_edma_engine *edma, u16 val, void __iomem *addr) 201static void edma_writew(struct fsl_edma_engine *edma, u16 val, void __iomem *addr)
204{ 202{
203 /* swap the reg offset for these in big-endian mode */
205 if (edma->big_endian) 204 if (edma->big_endian)
206 iowrite16be(val, addr); 205 iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2));
207 else 206 else
208 iowrite16(val, addr); 207 iowrite16(val, addr);
209} 208}
@@ -254,13 +253,12 @@ static void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
254 chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR; 253 chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
255 ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux; 254 ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
256 muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux]; 255 muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
256 slot = EDMAMUX_CHCFG_SOURCE(slot);
257 257
258 if (enable) 258 if (enable)
259 edma_writeb(fsl_chan->edma, 259 iowrite8(EDMAMUX_CHCFG_ENBL | slot, muxaddr + ch_off);
260 EDMAMUX_CHCFG_ENBL | EDMAMUX_CHCFG_SOURCE(slot),
261 muxaddr + ch_off);
262 else 260 else
263 edma_writeb(fsl_chan->edma, EDMAMUX_CHCFG_DIS, muxaddr + ch_off); 261 iowrite8(EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
264} 262}
265 263
266static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width) 264static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
@@ -286,9 +284,8 @@ static void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
286 284
287 fsl_desc = to_fsl_edma_desc(vdesc); 285 fsl_desc = to_fsl_edma_desc(vdesc);
288 for (i = 0; i < fsl_desc->n_tcds; i++) 286 for (i = 0; i < fsl_desc->n_tcds; i++)
289 dma_pool_free(fsl_desc->echan->tcd_pool, 287 dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
290 fsl_desc->tcd[i].vtcd, 288 fsl_desc->tcd[i].ptcd);
291 fsl_desc->tcd[i].ptcd);
292 kfree(fsl_desc); 289 kfree(fsl_desc);
293} 290}
294 291
@@ -363,8 +360,8 @@ static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
363 360
364 /* calculate the total size in this desc */ 361 /* calculate the total size in this desc */
365 for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++) 362 for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
366 len += edma_readl(fsl_chan->edma, &(edesc->tcd[i].vtcd->nbytes)) 363 len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
367 * edma_readw(fsl_chan->edma, &(edesc->tcd[i].vtcd->biter)); 364 * le16_to_cpu(edesc->tcd[i].vtcd->biter);
368 365
369 if (!in_progress) 366 if (!in_progress)
370 return len; 367 return len;
@@ -376,17 +373,15 @@ static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
376 373
377 /* figure out the finished and calculate the residue */ 374 /* figure out the finished and calculate the residue */
378 for (i = 0; i < fsl_chan->edesc->n_tcds; i++) { 375 for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
379 size = edma_readl(fsl_chan->edma, &(edesc->tcd[i].vtcd->nbytes)) 376 size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
380 * edma_readw(fsl_chan->edma, &(edesc->tcd[i].vtcd->biter)); 377 * le16_to_cpu(edesc->tcd[i].vtcd->biter);
381 if (dir == DMA_MEM_TO_DEV) 378 if (dir == DMA_MEM_TO_DEV)
382 dma_addr = edma_readl(fsl_chan->edma, 379 dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
383 &(edesc->tcd[i].vtcd->saddr));
384 else 380 else
385 dma_addr = edma_readl(fsl_chan->edma, 381 dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
386 &(edesc->tcd[i].vtcd->daddr));
387 382
388 len -= size; 383 len -= size;
389 if (cur_addr > dma_addr && cur_addr < dma_addr + size) { 384 if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
390 len += dma_addr + size - cur_addr; 385 len += dma_addr + size - cur_addr;
391 break; 386 break;
392 } 387 }
@@ -424,55 +419,67 @@ static enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
424 return fsl_chan->status; 419 return fsl_chan->status;
425} 420}
426 421
427static void fsl_edma_set_tcd_params(struct fsl_edma_chan *fsl_chan, 422static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
428 u32 src, u32 dst, u16 attr, u16 soff, u32 nbytes, 423 struct fsl_edma_hw_tcd *tcd)
429 u32 slast, u16 citer, u16 biter, u32 doff, u32 dlast_sga,
430 u16 csr)
431{ 424{
425 struct fsl_edma_engine *edma = fsl_chan->edma;
432 void __iomem *addr = fsl_chan->edma->membase; 426 void __iomem *addr = fsl_chan->edma->membase;
433 u32 ch = fsl_chan->vchan.chan.chan_id; 427 u32 ch = fsl_chan->vchan.chan.chan_id;
434 428
435 /* 429 /*
436 * TCD parameters have been swapped in fill_tcd_params(), 430 * TCD parameters are stored in struct fsl_edma_hw_tcd in little
437 * so just write them to registers in the cpu endian here 431 * endian format. However, we need to load the TCD registers in
432 * big- or little-endian obeying the eDMA engine model endian.
438 */ 433 */
439 writew(0, addr + EDMA_TCD_CSR(ch)); 434 edma_writew(edma, 0, addr + EDMA_TCD_CSR(ch));
440 writel(src, addr + EDMA_TCD_SADDR(ch)); 435 edma_writel(edma, le32_to_cpu(tcd->saddr), addr + EDMA_TCD_SADDR(ch));
441 writel(dst, addr + EDMA_TCD_DADDR(ch)); 436 edma_writel(edma, le32_to_cpu(tcd->daddr), addr + EDMA_TCD_DADDR(ch));
442 writew(attr, addr + EDMA_TCD_ATTR(ch)); 437
443 writew(soff, addr + EDMA_TCD_SOFF(ch)); 438 edma_writew(edma, le16_to_cpu(tcd->attr), addr + EDMA_TCD_ATTR(ch));
444 writel(nbytes, addr + EDMA_TCD_NBYTES(ch)); 439 edma_writew(edma, le16_to_cpu(tcd->soff), addr + EDMA_TCD_SOFF(ch));
445 writel(slast, addr + EDMA_TCD_SLAST(ch)); 440
446 writew(citer, addr + EDMA_TCD_CITER(ch)); 441 edma_writel(edma, le32_to_cpu(tcd->nbytes), addr + EDMA_TCD_NBYTES(ch));
447 writew(biter, addr + EDMA_TCD_BITER(ch)); 442 edma_writel(edma, le32_to_cpu(tcd->slast), addr + EDMA_TCD_SLAST(ch));
448 writew(doff, addr + EDMA_TCD_DOFF(ch)); 443
449 writel(dlast_sga, addr + EDMA_TCD_DLAST_SGA(ch)); 444 edma_writew(edma, le16_to_cpu(tcd->citer), addr + EDMA_TCD_CITER(ch));
450 writew(csr, addr + EDMA_TCD_CSR(ch)); 445 edma_writew(edma, le16_to_cpu(tcd->biter), addr + EDMA_TCD_BITER(ch));
451} 446 edma_writew(edma, le16_to_cpu(tcd->doff), addr + EDMA_TCD_DOFF(ch));
452 447
453static void fill_tcd_params(struct fsl_edma_engine *edma, 448 edma_writel(edma, le32_to_cpu(tcd->dlast_sga), addr + EDMA_TCD_DLAST_SGA(ch));
454 struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst, 449
455 u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer, 450 edma_writew(edma, le16_to_cpu(tcd->csr), addr + EDMA_TCD_CSR(ch));
456 u16 biter, u16 doff, u32 dlast_sga, bool major_int, 451}
457 bool disable_req, bool enable_sg) 452
453static inline
454void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
455 u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
456 u16 biter, u16 doff, u32 dlast_sga, bool major_int,
457 bool disable_req, bool enable_sg)
458{ 458{
459 u16 csr = 0; 459 u16 csr = 0;
460 460
461 /* 461 /*
462 * eDMA hardware SGs require the TCD parameters stored in memory 462 * eDMA hardware SGs require the TCDs to be stored in little
463 * the same endian as the eDMA module so that they can be loaded 463 * endian format irrespective of the register endian model.
464 * automatically by the engine 464 * So we put the value in little endian in memory, waiting
465 * for fsl_edma_set_tcd_regs doing the swap.
465 */ 466 */
466 edma_writel(edma, src, &(tcd->saddr)); 467 tcd->saddr = cpu_to_le32(src);
467 edma_writel(edma, dst, &(tcd->daddr)); 468 tcd->daddr = cpu_to_le32(dst);
468 edma_writew(edma, attr, &(tcd->attr)); 469
469 edma_writew(edma, EDMA_TCD_SOFF_SOFF(soff), &(tcd->soff)); 470 tcd->attr = cpu_to_le16(attr);
470 edma_writel(edma, EDMA_TCD_NBYTES_NBYTES(nbytes), &(tcd->nbytes)); 471
471 edma_writel(edma, EDMA_TCD_SLAST_SLAST(slast), &(tcd->slast)); 472 tcd->soff = cpu_to_le16(EDMA_TCD_SOFF_SOFF(soff));
472 edma_writew(edma, EDMA_TCD_CITER_CITER(citer), &(tcd->citer)); 473
473 edma_writew(edma, EDMA_TCD_DOFF_DOFF(doff), &(tcd->doff)); 474 tcd->nbytes = cpu_to_le32(EDMA_TCD_NBYTES_NBYTES(nbytes));
474 edma_writel(edma, EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga), &(tcd->dlast_sga)); 475 tcd->slast = cpu_to_le32(EDMA_TCD_SLAST_SLAST(slast));
475 edma_writew(edma, EDMA_TCD_BITER_BITER(biter), &(tcd->biter)); 476
477 tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
478 tcd->doff = cpu_to_le16(EDMA_TCD_DOFF_DOFF(doff));
479
480 tcd->dlast_sga = cpu_to_le32(EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga));
481
482 tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
476 if (major_int) 483 if (major_int)
477 csr |= EDMA_TCD_CSR_INT_MAJOR; 484 csr |= EDMA_TCD_CSR_INT_MAJOR;
478 485
@@ -482,7 +489,7 @@ static void fill_tcd_params(struct fsl_edma_engine *edma,
482 if (enable_sg) 489 if (enable_sg)
483 csr |= EDMA_TCD_CSR_E_SG; 490 csr |= EDMA_TCD_CSR_E_SG;
484 491
485 edma_writew(edma, csr, &(tcd->csr)); 492 tcd->csr = cpu_to_le16(csr);
486} 493}
487 494
488static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan, 495static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
@@ -558,9 +565,9 @@ static struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
558 doff = fsl_chan->fsc.addr_width; 565 doff = fsl_chan->fsc.addr_width;
559 } 566 }
560 567
561 fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd, src_addr, 568 fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
562 dst_addr, fsl_chan->fsc.attr, soff, nbytes, 0, 569 fsl_chan->fsc.attr, soff, nbytes, 0, iter,
563 iter, iter, doff, last_sg, true, false, true); 570 iter, doff, last_sg, true, false, true);
564 dma_buf_next += period_len; 571 dma_buf_next += period_len;
565 } 572 }
566 573
@@ -607,16 +614,16 @@ static struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
607 iter = sg_dma_len(sg) / nbytes; 614 iter = sg_dma_len(sg) / nbytes;
608 if (i < sg_len - 1) { 615 if (i < sg_len - 1) {
609 last_sg = fsl_desc->tcd[(i + 1)].ptcd; 616 last_sg = fsl_desc->tcd[(i + 1)].ptcd;
610 fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd, 617 fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
611 src_addr, dst_addr, fsl_chan->fsc.attr, 618 dst_addr, fsl_chan->fsc.attr, soff,
612 soff, nbytes, 0, iter, iter, doff, last_sg, 619 nbytes, 0, iter, iter, doff, last_sg,
613 false, false, true); 620 false, false, true);
614 } else { 621 } else {
615 last_sg = 0; 622 last_sg = 0;
616 fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd, 623 fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
617 src_addr, dst_addr, fsl_chan->fsc.attr, 624 dst_addr, fsl_chan->fsc.attr, soff,
618 soff, nbytes, 0, iter, iter, doff, last_sg, 625 nbytes, 0, iter, iter, doff, last_sg,
619 true, true, false); 626 true, true, false);
620 } 627 }
621 } 628 }
622 629
@@ -625,17 +632,13 @@ static struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
625 632
626static void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan) 633static void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
627{ 634{
628 struct fsl_edma_hw_tcd *tcd;
629 struct virt_dma_desc *vdesc; 635 struct virt_dma_desc *vdesc;
630 636
631 vdesc = vchan_next_desc(&fsl_chan->vchan); 637 vdesc = vchan_next_desc(&fsl_chan->vchan);
632 if (!vdesc) 638 if (!vdesc)
633 return; 639 return;
634 fsl_chan->edesc = to_fsl_edma_desc(vdesc); 640 fsl_chan->edesc = to_fsl_edma_desc(vdesc);
635 tcd = fsl_chan->edesc->tcd[0].vtcd; 641 fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
636 fsl_edma_set_tcd_params(fsl_chan, tcd->saddr, tcd->daddr, tcd->attr,
637 tcd->soff, tcd->nbytes, tcd->slast, tcd->citer,
638 tcd->biter, tcd->doff, tcd->dlast_sga, tcd->csr);
639 fsl_edma_enable_request(fsl_chan); 642 fsl_edma_enable_request(fsl_chan);
640 fsl_chan->status = DMA_IN_PROGRESS; 643 fsl_chan->status = DMA_IN_PROGRESS;
641} 644}
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 994bcb2c6b92..3d8feb5e4c2f 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1337,7 +1337,6 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev,
1337 1337
1338 /* Add the channel to DMA device channel list */ 1338 /* Add the channel to DMA device channel list */
1339 list_add_tail(&chan->common.device_node, &fdev->common.channels); 1339 list_add_tail(&chan->common.device_node, &fdev->common.channels);
1340 fdev->common.chancnt++;
1341 1340
1342 dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible, 1341 dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
1343 chan->irq != NO_IRQ ? chan->irq : fdev->irq); 1342 chan->irq != NO_IRQ ? chan->irq : fdev->irq);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 88afc48c2ca7..d0df198f62e9 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -729,6 +729,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
729 case IMX_DMATYPE_CSPI: 729 case IMX_DMATYPE_CSPI:
730 case IMX_DMATYPE_EXT: 730 case IMX_DMATYPE_EXT:
731 case IMX_DMATYPE_SSI: 731 case IMX_DMATYPE_SSI:
732 case IMX_DMATYPE_SAI:
732 per_2_emi = sdma->script_addrs->app_2_mcu_addr; 733 per_2_emi = sdma->script_addrs->app_2_mcu_addr;
733 emi_2_per = sdma->script_addrs->mcu_2_app_addr; 734 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
734 break; 735 break;
@@ -1287,7 +1288,8 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
1287 unsigned short *ram_code; 1288 unsigned short *ram_code;
1288 1289
1289 if (!fw) { 1290 if (!fw) {
1290 dev_err(sdma->dev, "firmware not found\n"); 1291 dev_info(sdma->dev, "external firmware not found, using ROM firmware\n");
1292 /* In this case we just use the ROM firmware. */
1291 return; 1293 return;
1292 } 1294 }
1293 1295
@@ -1346,7 +1348,7 @@ static int sdma_get_firmware(struct sdma_engine *sdma,
1346 return ret; 1348 return ret;
1347} 1349}
1348 1350
1349static int __init sdma_init(struct sdma_engine *sdma) 1351static int sdma_init(struct sdma_engine *sdma)
1350{ 1352{
1351 int i, ret; 1353 int i, ret;
1352 dma_addr_t ccb_phys; 1354 dma_addr_t ccb_phys;
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 895f869d6c2c..32eae38291e5 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -1265,9 +1265,17 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1265 op = IOAT_OP_XOR; 1265 op = IOAT_OP_XOR;
1266 1266
1267 dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); 1267 dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
1268 if (dma_mapping_error(dev, dest_dma))
1269 goto dma_unmap;
1270
1268 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) 1271 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
1272 dma_srcs[i] = DMA_ERROR_CODE;
1273 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
1269 dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, 1274 dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
1270 DMA_TO_DEVICE); 1275 DMA_TO_DEVICE);
1276 if (dma_mapping_error(dev, dma_srcs[i]))
1277 goto dma_unmap;
1278 }
1271 tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 1279 tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1272 IOAT_NUM_SRC_TEST, PAGE_SIZE, 1280 IOAT_NUM_SRC_TEST, PAGE_SIZE,
1273 DMA_PREP_INTERRUPT); 1281 DMA_PREP_INTERRUPT);
@@ -1298,7 +1306,6 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1298 goto dma_unmap; 1306 goto dma_unmap;
1299 } 1307 }
1300 1308
1301 dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
1302 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) 1309 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
1303 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); 1310 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
1304 1311
@@ -1313,6 +1320,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1313 } 1320 }
1314 dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); 1321 dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
1315 1322
1323 dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
1324
1316 /* skip validate if the capability is not present */ 1325 /* skip validate if the capability is not present */
1317 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) 1326 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
1318 goto free_resources; 1327 goto free_resources;
@@ -1327,8 +1336,13 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1327 xor_val_result = 1; 1336 xor_val_result = 1;
1328 1337
1329 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 1338 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1339 dma_srcs[i] = DMA_ERROR_CODE;
1340 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
1330 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, 1341 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
1331 DMA_TO_DEVICE); 1342 DMA_TO_DEVICE);
1343 if (dma_mapping_error(dev, dma_srcs[i]))
1344 goto dma_unmap;
1345 }
1332 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, 1346 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
1333 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, 1347 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
1334 &xor_val_result, DMA_PREP_INTERRUPT); 1348 &xor_val_result, DMA_PREP_INTERRUPT);
@@ -1374,8 +1388,13 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1374 1388
1375 xor_val_result = 0; 1389 xor_val_result = 0;
1376 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 1390 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1391 dma_srcs[i] = DMA_ERROR_CODE;
1392 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
1377 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, 1393 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
1378 DMA_TO_DEVICE); 1394 DMA_TO_DEVICE);
1395 if (dma_mapping_error(dev, dma_srcs[i]))
1396 goto dma_unmap;
1397 }
1379 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, 1398 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
1380 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, 1399 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
1381 &xor_val_result, DMA_PREP_INTERRUPT); 1400 &xor_val_result, DMA_PREP_INTERRUPT);
@@ -1417,14 +1436,18 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1417 goto free_resources; 1436 goto free_resources;
1418dma_unmap: 1437dma_unmap:
1419 if (op == IOAT_OP_XOR) { 1438 if (op == IOAT_OP_XOR) {
1420 dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); 1439 if (dest_dma != DMA_ERROR_CODE)
1440 dma_unmap_page(dev, dest_dma, PAGE_SIZE,
1441 DMA_FROM_DEVICE);
1421 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) 1442 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
1422 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, 1443 if (dma_srcs[i] != DMA_ERROR_CODE)
1423 DMA_TO_DEVICE); 1444 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1445 DMA_TO_DEVICE);
1424 } else if (op == IOAT_OP_XOR_VAL) { 1446 } else if (op == IOAT_OP_XOR_VAL) {
1425 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 1447 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1426 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, 1448 if (dma_srcs[i] != DMA_ERROR_CODE)
1427 DMA_TO_DEVICE); 1449 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1450 DMA_TO_DEVICE);
1428 } 1451 }
1429free_resources: 1452free_resources:
1430 dma->device_free_chan_resources(dma_chan); 1453 dma->device_free_chan_resources(dma_chan);
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index c56137bc3868..263d9f6a207e 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1557,7 +1557,6 @@ static struct platform_driver iop_adma_driver = {
1557 .probe = iop_adma_probe, 1557 .probe = iop_adma_probe,
1558 .remove = iop_adma_remove, 1558 .remove = iop_adma_remove,
1559 .driver = { 1559 .driver = {
1560 .owner = THIS_MODULE,
1561 .name = "iop-adma", 1560 .name = "iop-adma",
1562 }, 1561 },
1563}; 1562};
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index a1f911aaf220..a1de14ab2c51 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -722,7 +722,6 @@ static int k3_dma_probe(struct platform_device *op)
722 d->slave.device_issue_pending = k3_dma_issue_pending; 722 d->slave.device_issue_pending = k3_dma_issue_pending;
723 d->slave.device_control = k3_dma_control; 723 d->slave.device_control = k3_dma_control;
724 d->slave.copy_align = DMA_ALIGN; 724 d->slave.copy_align = DMA_ALIGN;
725 d->slave.chancnt = d->dma_requests;
726 725
727 /* init virtual channel */ 726 /* init virtual channel */
728 d->chans = devm_kzalloc(&op->dev, 727 d->chans = devm_kzalloc(&op->dev,
@@ -787,6 +786,7 @@ static int k3_dma_remove(struct platform_device *op)
787 return 0; 786 return 0;
788} 787}
789 788
789#ifdef CONFIG_PM_SLEEP
790static int k3_dma_suspend(struct device *dev) 790static int k3_dma_suspend(struct device *dev)
791{ 791{
792 struct k3_dma_dev *d = dev_get_drvdata(dev); 792 struct k3_dma_dev *d = dev_get_drvdata(dev);
@@ -816,13 +816,13 @@ static int k3_dma_resume(struct device *dev)
816 k3_dma_enable_dma(d, true); 816 k3_dma_enable_dma(d, true);
817 return 0; 817 return 0;
818} 818}
819#endif
819 820
820static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume); 821static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume);
821 822
822static struct platform_driver k3_pdma_driver = { 823static struct platform_driver k3_pdma_driver = {
823 .driver = { 824 .driver = {
824 .name = DRIVER_NAME, 825 .name = DRIVER_NAME,
825 .owner = THIS_MODULE,
826 .pm = &k3_dma_pmops, 826 .pm = &k3_dma_pmops,
827 .of_match_table = k3_pdma_dt_ids, 827 .of_match_table = k3_pdma_dt_ids,
828 }, 828 },
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index a1a4db5721b8..8b8952f35e6c 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -1098,7 +1098,6 @@ static const struct platform_device_id mmp_pdma_id_table[] = {
1098static struct platform_driver mmp_pdma_driver = { 1098static struct platform_driver mmp_pdma_driver = {
1099 .driver = { 1099 .driver = {
1100 .name = "mmp-pdma", 1100 .name = "mmp-pdma",
1101 .owner = THIS_MODULE,
1102 .of_match_table = mmp_pdma_dt_ids, 1101 .of_match_table = mmp_pdma_dt_ids,
1103 }, 1102 },
1104 .id_table = mmp_pdma_id_table, 1103 .id_table = mmp_pdma_id_table,
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index c6bd015b7165..bfb46957c3dc 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -703,7 +703,6 @@ static const struct platform_device_id mmp_tdma_id_table[] = {
703static struct platform_driver mmp_tdma_driver = { 703static struct platform_driver mmp_tdma_driver = {
704 .driver = { 704 .driver = {
705 .name = "mmp-tdma", 705 .name = "mmp-tdma",
706 .owner = THIS_MODULE,
707 .of_match_table = mmp_tdma_dt_ids, 706 .of_match_table = mmp_tdma_dt_ids,
708 }, 707 },
709 .id_table = mmp_tdma_id_table, 708 .id_table = mmp_tdma_id_table,
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 881db2bcb48b..01bec4023de2 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -885,6 +885,7 @@ static int mpc_dma_probe(struct platform_device *op)
885 struct resource res; 885 struct resource res;
886 ulong regs_start, regs_size; 886 ulong regs_start, regs_size;
887 int retval, i; 887 int retval, i;
888 u8 chancnt;
888 889
889 mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL); 890 mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
890 if (!mdma) { 891 if (!mdma) {
@@ -956,10 +957,6 @@ static int mpc_dma_probe(struct platform_device *op)
956 957
957 dma = &mdma->dma; 958 dma = &mdma->dma;
958 dma->dev = dev; 959 dma->dev = dev;
959 if (mdma->is_mpc8308)
960 dma->chancnt = MPC8308_DMACHAN_MAX;
961 else
962 dma->chancnt = MPC512x_DMACHAN_MAX;
963 dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources; 960 dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
964 dma->device_free_chan_resources = mpc_dma_free_chan_resources; 961 dma->device_free_chan_resources = mpc_dma_free_chan_resources;
965 dma->device_issue_pending = mpc_dma_issue_pending; 962 dma->device_issue_pending = mpc_dma_issue_pending;
@@ -972,7 +969,12 @@ static int mpc_dma_probe(struct platform_device *op)
972 dma_cap_set(DMA_MEMCPY, dma->cap_mask); 969 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
973 dma_cap_set(DMA_SLAVE, dma->cap_mask); 970 dma_cap_set(DMA_SLAVE, dma->cap_mask);
974 971
975 for (i = 0; i < dma->chancnt; i++) { 972 if (mdma->is_mpc8308)
973 chancnt = MPC8308_DMACHAN_MAX;
974 else
975 chancnt = MPC512x_DMACHAN_MAX;
976
977 for (i = 0; i < chancnt; i++) {
976 mchan = &mdma->channels[i]; 978 mchan = &mdma->channels[i];
977 979
978 mchan->chan.device = dma; 980 mchan->chan.device = dma;
@@ -1090,7 +1092,6 @@ static struct platform_driver mpc_dma_driver = {
1090 .remove = mpc_dma_remove, 1092 .remove = mpc_dma_remove,
1091 .driver = { 1093 .driver = {
1092 .name = DRV_NAME, 1094 .name = DRV_NAME,
1093 .owner = THIS_MODULE,
1094 .of_match_table = mpc_dma_match, 1095 .of_match_table = mpc_dma_match,
1095 }, 1096 },
1096}; 1097};
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index bda20e6e1007..d7d61e1a01c3 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -1500,7 +1500,6 @@ static const struct dev_pm_ops nbpf_pm_ops = {
1500 1500
1501static struct platform_driver nbpf_driver = { 1501static struct platform_driver nbpf_driver = {
1502 .driver = { 1502 .driver = {
1503 .owner = THIS_MODULE,
1504 .name = "dma-nbpf", 1503 .name = "dma-nbpf",
1505 .of_match_table = nbpf_match, 1504 .of_match_table = nbpf_match,
1506 .pm = &nbpf_pm_ops, 1505 .pm = &nbpf_pm_ops,
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index bbea8243f9e8..6ea1aded7e74 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -1074,8 +1074,6 @@ static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
1074 vchan_init(&c->vc, &od->ddev); 1074 vchan_init(&c->vc, &od->ddev);
1075 INIT_LIST_HEAD(&c->node); 1075 INIT_LIST_HEAD(&c->node);
1076 1076
1077 od->ddev.chancnt++;
1078
1079 return 0; 1077 return 0;
1080} 1078}
1081 1079
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 9f9ca9fe5ce6..6e0e47d76b23 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -997,7 +997,7 @@ static void pch_dma_remove(struct pci_dev *pdev)
997#define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810 997#define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810
998#define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815 998#define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815
999 999
1000const struct pci_device_id pch_dma_id_table[] = { 1000static const struct pci_device_id pch_dma_id_table[] = {
1001 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, 1001 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
1002 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 }, 1002 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
1003 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */ 1003 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 19a99743cf52..025b905f6db2 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2619,6 +2619,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2619 return -ENOMEM; 2619 return -ENOMEM;
2620 } 2620 }
2621 2621
2622 pd = &pl330->ddma;
2623 pd->dev = &adev->dev;
2624
2622 pl330->mcbufsz = pdat ? pdat->mcbuf_sz : 0; 2625 pl330->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
2623 2626
2624 res = &adev->res; 2627 res = &adev->res;
@@ -2655,7 +2658,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2655 if (!add_desc(pl330, GFP_KERNEL, NR_DEFAULT_DESC)) 2658 if (!add_desc(pl330, GFP_KERNEL, NR_DEFAULT_DESC))
2656 dev_warn(&adev->dev, "unable to allocate desc\n"); 2659 dev_warn(&adev->dev, "unable to allocate desc\n");
2657 2660
2658 pd = &pl330->ddma;
2659 INIT_LIST_HEAD(&pd->channels); 2661 INIT_LIST_HEAD(&pd->channels);
2660 2662
2661 /* Initialize channel parameters */ 2663 /* Initialize channel parameters */
@@ -2692,7 +2694,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2692 list_add_tail(&pch->chan.device_node, &pd->channels); 2694 list_add_tail(&pch->chan.device_node, &pd->channels);
2693 } 2695 }
2694 2696
2695 pd->dev = &adev->dev;
2696 if (pdat) { 2697 if (pdat) {
2697 pd->cap_mask = pdat->cap_mask; 2698 pd->cap_mask = pdat->cap_mask;
2698 } else { 2699 } else {
@@ -2819,6 +2820,6 @@ static struct amba_driver pl330_driver = {
2819 2820
2820module_amba_driver(pl330_driver); 2821module_amba_driver(pl330_driver);
2821 2822
2822MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>"); 2823MODULE_AUTHOR("Jaswinder Singh <jassisinghbrar@gmail.com>");
2823MODULE_DESCRIPTION("API Driver for PL330 DMAC"); 2824MODULE_DESCRIPTION("API Driver for PL330 DMAC");
2824MODULE_LICENSE("GPL"); 2825MODULE_LICENSE("GPL");
diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom_bam_dma.c
index 7a4bbb0f80a5..3122a99ec06b 100644
--- a/drivers/dma/qcom_bam_dma.c
+++ b/drivers/dma/qcom_bam_dma.c
@@ -79,35 +79,97 @@ struct bam_async_desc {
79 struct bam_desc_hw desc[0]; 79 struct bam_desc_hw desc[0];
80}; 80};
81 81
82#define BAM_CTRL 0x0000 82enum bam_reg {
83#define BAM_REVISION 0x0004 83 BAM_CTRL,
84#define BAM_SW_REVISION 0x0080 84 BAM_REVISION,
85#define BAM_NUM_PIPES 0x003C 85 BAM_NUM_PIPES,
86#define BAM_TIMER 0x0040 86 BAM_DESC_CNT_TRSHLD,
87#define BAM_TIMER_CTRL 0x0044 87 BAM_IRQ_SRCS,
88#define BAM_DESC_CNT_TRSHLD 0x0008 88 BAM_IRQ_SRCS_MSK,
89#define BAM_IRQ_SRCS 0x000C 89 BAM_IRQ_SRCS_UNMASKED,
90#define BAM_IRQ_SRCS_MSK 0x0010 90 BAM_IRQ_STTS,
91#define BAM_IRQ_SRCS_UNMASKED 0x0030 91 BAM_IRQ_CLR,
92#define BAM_IRQ_STTS 0x0014 92 BAM_IRQ_EN,
93#define BAM_IRQ_CLR 0x0018 93 BAM_CNFG_BITS,
94#define BAM_IRQ_EN 0x001C 94 BAM_IRQ_SRCS_EE,
95#define BAM_CNFG_BITS 0x007C 95 BAM_IRQ_SRCS_MSK_EE,
96#define BAM_IRQ_SRCS_EE(ee) (0x0800 + ((ee) * 0x80)) 96 BAM_P_CTRL,
97#define BAM_IRQ_SRCS_MSK_EE(ee) (0x0804 + ((ee) * 0x80)) 97 BAM_P_RST,
98#define BAM_P_CTRL(pipe) (0x1000 + ((pipe) * 0x1000)) 98 BAM_P_HALT,
99#define BAM_P_RST(pipe) (0x1004 + ((pipe) * 0x1000)) 99 BAM_P_IRQ_STTS,
100#define BAM_P_HALT(pipe) (0x1008 + ((pipe) * 0x1000)) 100 BAM_P_IRQ_CLR,
101#define BAM_P_IRQ_STTS(pipe) (0x1010 + ((pipe) * 0x1000)) 101 BAM_P_IRQ_EN,
102#define BAM_P_IRQ_CLR(pipe) (0x1014 + ((pipe) * 0x1000)) 102 BAM_P_EVNT_DEST_ADDR,
103#define BAM_P_IRQ_EN(pipe) (0x1018 + ((pipe) * 0x1000)) 103 BAM_P_EVNT_REG,
104#define BAM_P_EVNT_DEST_ADDR(pipe) (0x182C + ((pipe) * 0x1000)) 104 BAM_P_SW_OFSTS,
105#define BAM_P_EVNT_REG(pipe) (0x1818 + ((pipe) * 0x1000)) 105 BAM_P_DATA_FIFO_ADDR,
106#define BAM_P_SW_OFSTS(pipe) (0x1800 + ((pipe) * 0x1000)) 106 BAM_P_DESC_FIFO_ADDR,
107#define BAM_P_DATA_FIFO_ADDR(pipe) (0x1824 + ((pipe) * 0x1000)) 107 BAM_P_EVNT_GEN_TRSHLD,
108#define BAM_P_DESC_FIFO_ADDR(pipe) (0x181C + ((pipe) * 0x1000)) 108 BAM_P_FIFO_SIZES,
109#define BAM_P_EVNT_TRSHLD(pipe) (0x1828 + ((pipe) * 0x1000)) 109};
110#define BAM_P_FIFO_SIZES(pipe) (0x1820 + ((pipe) * 0x1000)) 110
111struct reg_offset_data {
112 u32 base_offset;
113 unsigned int pipe_mult, evnt_mult, ee_mult;
114};
115
116static const struct reg_offset_data bam_v1_3_reg_info[] = {
117 [BAM_CTRL] = { 0x0F80, 0x00, 0x00, 0x00 },
118 [BAM_REVISION] = { 0x0F84, 0x00, 0x00, 0x00 },
119 [BAM_NUM_PIPES] = { 0x0FBC, 0x00, 0x00, 0x00 },
120 [BAM_DESC_CNT_TRSHLD] = { 0x0F88, 0x00, 0x00, 0x00 },
121 [BAM_IRQ_SRCS] = { 0x0F8C, 0x00, 0x00, 0x00 },
122 [BAM_IRQ_SRCS_MSK] = { 0x0F90, 0x00, 0x00, 0x00 },
123 [BAM_IRQ_SRCS_UNMASKED] = { 0x0FB0, 0x00, 0x00, 0x00 },
124 [BAM_IRQ_STTS] = { 0x0F94, 0x00, 0x00, 0x00 },
125 [BAM_IRQ_CLR] = { 0x0F98, 0x00, 0x00, 0x00 },
126 [BAM_IRQ_EN] = { 0x0F9C, 0x00, 0x00, 0x00 },
127 [BAM_CNFG_BITS] = { 0x0FFC, 0x00, 0x00, 0x00 },
128 [BAM_IRQ_SRCS_EE] = { 0x1800, 0x00, 0x00, 0x80 },
129 [BAM_IRQ_SRCS_MSK_EE] = { 0x1804, 0x00, 0x00, 0x80 },
130 [BAM_P_CTRL] = { 0x0000, 0x80, 0x00, 0x00 },
131 [BAM_P_RST] = { 0x0004, 0x80, 0x00, 0x00 },
132 [BAM_P_HALT] = { 0x0008, 0x80, 0x00, 0x00 },
133 [BAM_P_IRQ_STTS] = { 0x0010, 0x80, 0x00, 0x00 },
134 [BAM_P_IRQ_CLR] = { 0x0014, 0x80, 0x00, 0x00 },
135 [BAM_P_IRQ_EN] = { 0x0018, 0x80, 0x00, 0x00 },
136 [BAM_P_EVNT_DEST_ADDR] = { 0x102C, 0x00, 0x40, 0x00 },
137 [BAM_P_EVNT_REG] = { 0x1018, 0x00, 0x40, 0x00 },
138 [BAM_P_SW_OFSTS] = { 0x1000, 0x00, 0x40, 0x00 },
139 [BAM_P_DATA_FIFO_ADDR] = { 0x1024, 0x00, 0x40, 0x00 },
140 [BAM_P_DESC_FIFO_ADDR] = { 0x101C, 0x00, 0x40, 0x00 },
141 [BAM_P_EVNT_GEN_TRSHLD] = { 0x1028, 0x00, 0x40, 0x00 },
142 [BAM_P_FIFO_SIZES] = { 0x1020, 0x00, 0x40, 0x00 },
143};
144
145static const struct reg_offset_data bam_v1_4_reg_info[] = {
146 [BAM_CTRL] = { 0x0000, 0x00, 0x00, 0x00 },
147 [BAM_REVISION] = { 0x0004, 0x00, 0x00, 0x00 },
148 [BAM_NUM_PIPES] = { 0x003C, 0x00, 0x00, 0x00 },
149 [BAM_DESC_CNT_TRSHLD] = { 0x0008, 0x00, 0x00, 0x00 },
150 [BAM_IRQ_SRCS] = { 0x000C, 0x00, 0x00, 0x00 },
151 [BAM_IRQ_SRCS_MSK] = { 0x0010, 0x00, 0x00, 0x00 },
152 [BAM_IRQ_SRCS_UNMASKED] = { 0x0030, 0x00, 0x00, 0x00 },
153 [BAM_IRQ_STTS] = { 0x0014, 0x00, 0x00, 0x00 },
154 [BAM_IRQ_CLR] = { 0x0018, 0x00, 0x00, 0x00 },
155 [BAM_IRQ_EN] = { 0x001C, 0x00, 0x00, 0x00 },
156 [BAM_CNFG_BITS] = { 0x007C, 0x00, 0x00, 0x00 },
157 [BAM_IRQ_SRCS_EE] = { 0x0800, 0x00, 0x00, 0x80 },
158 [BAM_IRQ_SRCS_MSK_EE] = { 0x0804, 0x00, 0x00, 0x80 },
159 [BAM_P_CTRL] = { 0x1000, 0x1000, 0x00, 0x00 },
160 [BAM_P_RST] = { 0x1004, 0x1000, 0x00, 0x00 },
161 [BAM_P_HALT] = { 0x1008, 0x1000, 0x00, 0x00 },
162 [BAM_P_IRQ_STTS] = { 0x1010, 0x1000, 0x00, 0x00 },
163 [BAM_P_IRQ_CLR] = { 0x1014, 0x1000, 0x00, 0x00 },
164 [BAM_P_IRQ_EN] = { 0x1018, 0x1000, 0x00, 0x00 },
165 [BAM_P_EVNT_DEST_ADDR] = { 0x102C, 0x00, 0x1000, 0x00 },
166 [BAM_P_EVNT_REG] = { 0x1018, 0x00, 0x1000, 0x00 },
167 [BAM_P_SW_OFSTS] = { 0x1000, 0x00, 0x1000, 0x00 },
168 [BAM_P_DATA_FIFO_ADDR] = { 0x1824, 0x00, 0x1000, 0x00 },
169 [BAM_P_DESC_FIFO_ADDR] = { 0x181C, 0x00, 0x1000, 0x00 },
170 [BAM_P_EVNT_GEN_TRSHLD] = { 0x1828, 0x00, 0x1000, 0x00 },
171 [BAM_P_FIFO_SIZES] = { 0x1820, 0x00, 0x1000, 0x00 },
172};
111 173
112/* BAM CTRL */ 174/* BAM CTRL */
113#define BAM_SW_RST BIT(0) 175#define BAM_SW_RST BIT(0)
@@ -297,6 +359,8 @@ struct bam_device {
297 /* execution environment ID, from DT */ 359 /* execution environment ID, from DT */
298 u32 ee; 360 u32 ee;
299 361
362 const struct reg_offset_data *layout;
363
300 struct clk *bamclk; 364 struct clk *bamclk;
301 int irq; 365 int irq;
302 366
@@ -305,6 +369,23 @@ struct bam_device {
305}; 369};
306 370
307/** 371/**
372 * bam_addr - returns BAM register address
373 * @bdev: bam device
374 * @pipe: pipe instance (ignored when register doesn't have multiple instances)
375 * @reg: register enum
376 */
377static inline void __iomem *bam_addr(struct bam_device *bdev, u32 pipe,
378 enum bam_reg reg)
379{
380 const struct reg_offset_data r = bdev->layout[reg];
381
382 return bdev->regs + r.base_offset +
383 r.pipe_mult * pipe +
384 r.evnt_mult * pipe +
385 r.ee_mult * bdev->ee;
386}
387
388/**
308 * bam_reset_channel - Reset individual BAM DMA channel 389 * bam_reset_channel - Reset individual BAM DMA channel
309 * @bchan: bam channel 390 * @bchan: bam channel
310 * 391 *
@@ -317,8 +398,8 @@ static void bam_reset_channel(struct bam_chan *bchan)
317 lockdep_assert_held(&bchan->vc.lock); 398 lockdep_assert_held(&bchan->vc.lock);
318 399
319 /* reset channel */ 400 /* reset channel */
320 writel_relaxed(1, bdev->regs + BAM_P_RST(bchan->id)); 401 writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_RST));
321 writel_relaxed(0, bdev->regs + BAM_P_RST(bchan->id)); 402 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_RST));
322 403
323 /* don't allow cpu to reorder BAM register accesses done after this */ 404 /* don't allow cpu to reorder BAM register accesses done after this */
324 wmb(); 405 wmb();
@@ -347,17 +428,18 @@ static void bam_chan_init_hw(struct bam_chan *bchan,
347 * because we allocated 1 more descriptor (8 bytes) than we can use 428 * because we allocated 1 more descriptor (8 bytes) than we can use
348 */ 429 */
349 writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)), 430 writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)),
350 bdev->regs + BAM_P_DESC_FIFO_ADDR(bchan->id)); 431 bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR));
351 writel_relaxed(BAM_DESC_FIFO_SIZE, bdev->regs + 432 writel_relaxed(BAM_DESC_FIFO_SIZE,
352 BAM_P_FIFO_SIZES(bchan->id)); 433 bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES));
353 434
354 /* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */ 435 /* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */
355 writel_relaxed(P_DEFAULT_IRQS_EN, bdev->regs + BAM_P_IRQ_EN(bchan->id)); 436 writel_relaxed(P_DEFAULT_IRQS_EN,
437 bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
356 438
357 /* unmask the specific pipe and EE combo */ 439 /* unmask the specific pipe and EE combo */
358 val = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee)); 440 val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
359 val |= BIT(bchan->id); 441 val |= BIT(bchan->id);
360 writel_relaxed(val, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee)); 442 writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
361 443
362 /* don't allow cpu to reorder the channel enable done below */ 444 /* don't allow cpu to reorder the channel enable done below */
363 wmb(); 445 wmb();
@@ -367,7 +449,7 @@ static void bam_chan_init_hw(struct bam_chan *bchan,
367 if (dir == DMA_DEV_TO_MEM) 449 if (dir == DMA_DEV_TO_MEM)
368 val |= P_DIRECTION; 450 val |= P_DIRECTION;
369 451
370 writel_relaxed(val, bdev->regs + BAM_P_CTRL(bchan->id)); 452 writel_relaxed(val, bam_addr(bdev, bchan->id, BAM_P_CTRL));
371 453
372 bchan->initialized = 1; 454 bchan->initialized = 1;
373 455
@@ -432,12 +514,12 @@ static void bam_free_chan(struct dma_chan *chan)
432 bchan->fifo_virt = NULL; 514 bchan->fifo_virt = NULL;
433 515
434 /* mask irq for pipe/channel */ 516 /* mask irq for pipe/channel */
435 val = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee)); 517 val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
436 val &= ~BIT(bchan->id); 518 val &= ~BIT(bchan->id);
437 writel_relaxed(val, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee)); 519 writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
438 520
439 /* disable irq */ 521 /* disable irq */
440 writel_relaxed(0, bdev->regs + BAM_P_IRQ_EN(bchan->id)); 522 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
441} 523}
442 524
443/** 525/**
@@ -583,14 +665,14 @@ static int bam_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
583 switch (cmd) { 665 switch (cmd) {
584 case DMA_PAUSE: 666 case DMA_PAUSE:
585 spin_lock_irqsave(&bchan->vc.lock, flag); 667 spin_lock_irqsave(&bchan->vc.lock, flag);
586 writel_relaxed(1, bdev->regs + BAM_P_HALT(bchan->id)); 668 writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT));
587 bchan->paused = 1; 669 bchan->paused = 1;
588 spin_unlock_irqrestore(&bchan->vc.lock, flag); 670 spin_unlock_irqrestore(&bchan->vc.lock, flag);
589 break; 671 break;
590 672
591 case DMA_RESUME: 673 case DMA_RESUME:
592 spin_lock_irqsave(&bchan->vc.lock, flag); 674 spin_lock_irqsave(&bchan->vc.lock, flag);
593 writel_relaxed(0, bdev->regs + BAM_P_HALT(bchan->id)); 675 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
594 bchan->paused = 0; 676 bchan->paused = 0;
595 spin_unlock_irqrestore(&bchan->vc.lock, flag); 677 spin_unlock_irqrestore(&bchan->vc.lock, flag);
596 break; 678 break;
@@ -626,7 +708,7 @@ static u32 process_channel_irqs(struct bam_device *bdev)
626 unsigned long flags; 708 unsigned long flags;
627 struct bam_async_desc *async_desc; 709 struct bam_async_desc *async_desc;
628 710
629 srcs = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_EE(bdev->ee)); 711 srcs = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_EE));
630 712
631 /* return early if no pipe/channel interrupts are present */ 713 /* return early if no pipe/channel interrupts are present */
632 if (!(srcs & P_IRQ)) 714 if (!(srcs & P_IRQ))
@@ -639,11 +721,9 @@ static u32 process_channel_irqs(struct bam_device *bdev)
639 continue; 721 continue;
640 722
641 /* clear pipe irq */ 723 /* clear pipe irq */
642 pipe_stts = readl_relaxed(bdev->regs + 724 pipe_stts = readl_relaxed(bam_addr(bdev, i, BAM_P_IRQ_STTS));
643 BAM_P_IRQ_STTS(i));
644 725
645 writel_relaxed(pipe_stts, bdev->regs + 726 writel_relaxed(pipe_stts, bam_addr(bdev, i, BAM_P_IRQ_CLR));
646 BAM_P_IRQ_CLR(i));
647 727
648 spin_lock_irqsave(&bchan->vc.lock, flags); 728 spin_lock_irqsave(&bchan->vc.lock, flags);
649 async_desc = bchan->curr_txd; 729 async_desc = bchan->curr_txd;
@@ -694,12 +774,12 @@ static irqreturn_t bam_dma_irq(int irq, void *data)
694 tasklet_schedule(&bdev->task); 774 tasklet_schedule(&bdev->task);
695 775
696 if (srcs & BAM_IRQ) 776 if (srcs & BAM_IRQ)
697 clr_mask = readl_relaxed(bdev->regs + BAM_IRQ_STTS); 777 clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS));
698 778
699 /* don't allow reorder of the various accesses to the BAM registers */ 779 /* don't allow reorder of the various accesses to the BAM registers */
700 mb(); 780 mb();
701 781
702 writel_relaxed(clr_mask, bdev->regs + BAM_IRQ_CLR); 782 writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR));
703 783
704 return IRQ_HANDLED; 784 return IRQ_HANDLED;
705} 785}
@@ -763,7 +843,7 @@ static void bam_apply_new_config(struct bam_chan *bchan,
763 else 843 else
764 maxburst = bchan->slave.dst_maxburst; 844 maxburst = bchan->slave.dst_maxburst;
765 845
766 writel_relaxed(maxburst, bdev->regs + BAM_DESC_CNT_TRSHLD); 846 writel_relaxed(maxburst, bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
767 847
768 bchan->reconfigure = 0; 848 bchan->reconfigure = 0;
769} 849}
@@ -830,7 +910,7 @@ static void bam_start_dma(struct bam_chan *bchan)
830 /* ensure descriptor writes and dma start not reordered */ 910 /* ensure descriptor writes and dma start not reordered */
831 wmb(); 911 wmb();
832 writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw), 912 writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw),
833 bdev->regs + BAM_P_EVNT_REG(bchan->id)); 913 bam_addr(bdev, bchan->id, BAM_P_EVNT_REG));
834} 914}
835 915
836/** 916/**
@@ -918,43 +998,44 @@ static int bam_init(struct bam_device *bdev)
918 u32 val; 998 u32 val;
919 999
920 /* read revision and configuration information */ 1000 /* read revision and configuration information */
921 val = readl_relaxed(bdev->regs + BAM_REVISION) >> NUM_EES_SHIFT; 1001 val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)) >> NUM_EES_SHIFT;
922 val &= NUM_EES_MASK; 1002 val &= NUM_EES_MASK;
923 1003
924 /* check that configured EE is within range */ 1004 /* check that configured EE is within range */
925 if (bdev->ee >= val) 1005 if (bdev->ee >= val)
926 return -EINVAL; 1006 return -EINVAL;
927 1007
928 val = readl_relaxed(bdev->regs + BAM_NUM_PIPES); 1008 val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES));
929 bdev->num_channels = val & BAM_NUM_PIPES_MASK; 1009 bdev->num_channels = val & BAM_NUM_PIPES_MASK;
930 1010
931 /* s/w reset bam */ 1011 /* s/w reset bam */
932 /* after reset all pipes are disabled and idle */ 1012 /* after reset all pipes are disabled and idle */
933 val = readl_relaxed(bdev->regs + BAM_CTRL); 1013 val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL));
934 val |= BAM_SW_RST; 1014 val |= BAM_SW_RST;
935 writel_relaxed(val, bdev->regs + BAM_CTRL); 1015 writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
936 val &= ~BAM_SW_RST; 1016 val &= ~BAM_SW_RST;
937 writel_relaxed(val, bdev->regs + BAM_CTRL); 1017 writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
938 1018
939 /* make sure previous stores are visible before enabling BAM */ 1019 /* make sure previous stores are visible before enabling BAM */
940 wmb(); 1020 wmb();
941 1021
942 /* enable bam */ 1022 /* enable bam */
943 val |= BAM_EN; 1023 val |= BAM_EN;
944 writel_relaxed(val, bdev->regs + BAM_CTRL); 1024 writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
945 1025
946 /* set descriptor threshhold, start with 4 bytes */ 1026 /* set descriptor threshhold, start with 4 bytes */
947 writel_relaxed(DEFAULT_CNT_THRSHLD, bdev->regs + BAM_DESC_CNT_TRSHLD); 1027 writel_relaxed(DEFAULT_CNT_THRSHLD,
1028 bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
948 1029
949 /* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */ 1030 /* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */
950 writel_relaxed(BAM_CNFG_BITS_DEFAULT, bdev->regs + BAM_CNFG_BITS); 1031 writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS));
951 1032
952 /* enable irqs for errors */ 1033 /* enable irqs for errors */
953 writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN, 1034 writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN,
954 bdev->regs + BAM_IRQ_EN); 1035 bam_addr(bdev, 0, BAM_IRQ_EN));
955 1036
956 /* unmask global bam interrupt */ 1037 /* unmask global bam interrupt */
957 writel_relaxed(BAM_IRQ_MSK, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee)); 1038 writel_relaxed(BAM_IRQ_MSK, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
958 1039
959 return 0; 1040 return 0;
960} 1041}
@@ -969,9 +1050,18 @@ static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan,
969 bchan->vc.desc_free = bam_dma_free_desc; 1050 bchan->vc.desc_free = bam_dma_free_desc;
970} 1051}
971 1052
1053static const struct of_device_id bam_of_match[] = {
1054 { .compatible = "qcom,bam-v1.3.0", .data = &bam_v1_3_reg_info },
1055 { .compatible = "qcom,bam-v1.4.0", .data = &bam_v1_4_reg_info },
1056 {}
1057};
1058
1059MODULE_DEVICE_TABLE(of, bam_of_match);
1060
972static int bam_dma_probe(struct platform_device *pdev) 1061static int bam_dma_probe(struct platform_device *pdev)
973{ 1062{
974 struct bam_device *bdev; 1063 struct bam_device *bdev;
1064 const struct of_device_id *match;
975 struct resource *iores; 1065 struct resource *iores;
976 int ret, i; 1066 int ret, i;
977 1067
@@ -981,6 +1071,14 @@ static int bam_dma_probe(struct platform_device *pdev)
981 1071
982 bdev->dev = &pdev->dev; 1072 bdev->dev = &pdev->dev;
983 1073
1074 match = of_match_node(bam_of_match, pdev->dev.of_node);
1075 if (!match) {
1076 dev_err(&pdev->dev, "Unsupported BAM module\n");
1077 return -ENODEV;
1078 }
1079
1080 bdev->layout = match->data;
1081
984 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1082 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
985 bdev->regs = devm_ioremap_resource(&pdev->dev, iores); 1083 bdev->regs = devm_ioremap_resource(&pdev->dev, iores);
986 if (IS_ERR(bdev->regs)) 1084 if (IS_ERR(bdev->regs))
@@ -1084,7 +1182,7 @@ static int bam_dma_remove(struct platform_device *pdev)
1084 dma_async_device_unregister(&bdev->common); 1182 dma_async_device_unregister(&bdev->common);
1085 1183
1086 /* mask all interrupts for this execution environment */ 1184 /* mask all interrupts for this execution environment */
1087 writel_relaxed(0, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee)); 1185 writel_relaxed(0, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
1088 1186
1089 devm_free_irq(bdev->dev, bdev->irq, bdev); 1187 devm_free_irq(bdev->dev, bdev->irq, bdev);
1090 1188
@@ -1104,18 +1202,11 @@ static int bam_dma_remove(struct platform_device *pdev)
1104 return 0; 1202 return 0;
1105} 1203}
1106 1204
1107static const struct of_device_id bam_of_match[] = {
1108 { .compatible = "qcom,bam-v1.4.0", },
1109 {}
1110};
1111MODULE_DEVICE_TABLE(of, bam_of_match);
1112
1113static struct platform_driver bam_dma_driver = { 1205static struct platform_driver bam_dma_driver = {
1114 .probe = bam_dma_probe, 1206 .probe = bam_dma_probe,
1115 .remove = bam_dma_remove, 1207 .remove = bam_dma_remove,
1116 .driver = { 1208 .driver = {
1117 .name = "bam-dma-engine", 1209 .name = "bam-dma-engine",
1118 .owner = THIS_MODULE,
1119 .of_match_table = bam_of_match, 1210 .of_match_table = bam_of_match,
1120 }, 1211 },
1121}; 1212};
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 7416572d1e40..6941a77521c3 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -1402,7 +1402,6 @@ static int s3c24xx_dma_remove(struct platform_device *pdev)
1402static struct platform_driver s3c24xx_dma_driver = { 1402static struct platform_driver s3c24xx_dma_driver = {
1403 .driver = { 1403 .driver = {
1404 .name = "s3c24xx-dma", 1404 .name = "s3c24xx-dma",
1405 .owner = THIS_MODULE,
1406 }, 1405 },
1407 .id_table = s3c24xx_dma_driver_ids, 1406 .id_table = s3c24xx_dma_driver_ids,
1408 .probe = s3c24xx_dma_probe, 1407 .probe = s3c24xx_dma_probe,
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 4b0ef043729a..2329d295efb5 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -829,7 +829,6 @@ static int sa11x0_dma_init_dmadev(struct dma_device *dmadev,
829{ 829{
830 unsigned i; 830 unsigned i;
831 831
832 dmadev->chancnt = ARRAY_SIZE(chan_desc);
833 INIT_LIST_HEAD(&dmadev->channels); 832 INIT_LIST_HEAD(&dmadev->channels);
834 dmadev->dev = dev; 833 dmadev->dev = dev;
835 dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources; 834 dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources;
@@ -838,7 +837,7 @@ static int sa11x0_dma_init_dmadev(struct dma_device *dmadev,
838 dmadev->device_tx_status = sa11x0_dma_tx_status; 837 dmadev->device_tx_status = sa11x0_dma_tx_status;
839 dmadev->device_issue_pending = sa11x0_dma_issue_pending; 838 dmadev->device_issue_pending = sa11x0_dma_issue_pending;
840 839
841 for (i = 0; i < dmadev->chancnt; i++) { 840 for (i = 0; i < ARRAY_SIZE(chan_desc); i++) {
842 struct sa11x0_dma_chan *c; 841 struct sa11x0_dma_chan *c;
843 842
844 c = kzalloc(sizeof(*c), GFP_KERNEL); 843 c = kzalloc(sizeof(*c), GFP_KERNEL);
diff --git a/drivers/dma/sh/rcar-audmapp.c b/drivers/dma/sh/rcar-audmapp.c
index 80fd2aeb4870..d95bbdd721f4 100644
--- a/drivers/dma/sh/rcar-audmapp.c
+++ b/drivers/dma/sh/rcar-audmapp.c
@@ -253,7 +253,6 @@ static int audmapp_chan_probe(struct platform_device *pdev,
253 253
254static void audmapp_chan_remove(struct audmapp_device *audev) 254static void audmapp_chan_remove(struct audmapp_device *audev)
255{ 255{
256 struct dma_device *dma_dev = &audev->shdma_dev.dma_dev;
257 struct shdma_chan *schan; 256 struct shdma_chan *schan;
258 int i; 257 int i;
259 258
@@ -261,7 +260,6 @@ static void audmapp_chan_remove(struct audmapp_device *audev)
261 BUG_ON(!schan); 260 BUG_ON(!schan);
262 shdma_chan_remove(schan); 261 shdma_chan_remove(schan);
263 } 262 }
264 dma_dev->chancnt = 0;
265} 263}
266 264
267static struct dma_chan *audmapp_of_xlate(struct of_phandle_args *dma_spec, 265static struct dma_chan *audmapp_of_xlate(struct of_phandle_args *dma_spec,
@@ -367,7 +365,6 @@ static struct platform_driver audmapp_driver = {
367 .probe = audmapp_probe, 365 .probe = audmapp_probe,
368 .remove = audmapp_remove, 366 .remove = audmapp_remove,
369 .driver = { 367 .driver = {
370 .owner = THIS_MODULE,
371 .name = "rcar-audmapp-engine", 368 .name = "rcar-audmapp-engine",
372 .of_match_table = audmapp_of_match, 369 .of_match_table = audmapp_of_match,
373 }, 370 },
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c
index b212d9471ab5..20a6f6f2a018 100644
--- a/drivers/dma/sh/rcar-hpbdma.c
+++ b/drivers/dma/sh/rcar-hpbdma.c
@@ -619,7 +619,6 @@ error:
619 619
620static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev) 620static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev)
621{ 621{
622 struct dma_device *dma_dev = &hpbdev->shdma_dev.dma_dev;
623 struct shdma_chan *schan; 622 struct shdma_chan *schan;
624 int i; 623 int i;
625 624
@@ -628,7 +627,6 @@ static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev)
628 627
629 shdma_chan_remove(schan); 628 shdma_chan_remove(schan);
630 } 629 }
631 dma_dev->chancnt = 0;
632} 630}
633 631
634static int hpb_dmae_remove(struct platform_device *pdev) 632static int hpb_dmae_remove(struct platform_device *pdev)
@@ -655,7 +653,6 @@ static struct platform_driver hpb_dmae_driver = {
655 .remove = hpb_dmae_remove, 653 .remove = hpb_dmae_remove,
656 .shutdown = hpb_dmae_shutdown, 654 .shutdown = hpb_dmae_shutdown,
657 .driver = { 655 .driver = {
658 .owner = THIS_MODULE,
659 .name = "hpb-dma-engine", 656 .name = "hpb-dma-engine",
660 }, 657 },
661}; 658};
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 42d497416196..3a2adb131d46 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -391,6 +391,8 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
391 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); 391 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
392 pm_runtime_put(schan->dev); 392 pm_runtime_put(schan->dev);
393 schan->pm_state = SHDMA_PM_ESTABLISHED; 393 schan->pm_state = SHDMA_PM_ESTABLISHED;
394 } else if (schan->pm_state == SHDMA_PM_PENDING) {
395 shdma_chan_xfer_ld_queue(schan);
394 } 396 }
395 } 397 }
396 } 398 }
@@ -951,7 +953,7 @@ void shdma_chan_probe(struct shdma_dev *sdev,
951 /* Add the channel to DMA device channel list */ 953 /* Add the channel to DMA device channel list */
952 list_add_tail(&schan->dma_chan.device_node, 954 list_add_tail(&schan->dma_chan.device_node,
953 &sdev->dma_dev.channels); 955 &sdev->dma_dev.channels);
954 sdev->schan[sdev->dma_dev.chancnt++] = schan; 956 sdev->schan[id] = schan;
955} 957}
956EXPORT_SYMBOL(shdma_chan_probe); 958EXPORT_SYMBOL(shdma_chan_probe);
957 959
diff --git a/drivers/dma/sh/shdma-of.c b/drivers/dma/sh/shdma-of.c
index b4ff9d3e56d1..f999f9b0d314 100644
--- a/drivers/dma/sh/shdma-of.c
+++ b/drivers/dma/sh/shdma-of.c
@@ -66,7 +66,6 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
66 66
67static struct platform_driver shdma_of = { 67static struct platform_driver shdma_of = {
68 .driver = { 68 .driver = {
69 .owner = THIS_MODULE,
70 .name = "shdma-of", 69 .name = "shdma-of",
71 .of_match_table = shdma_of_match, 70 .of_match_table = shdma_of_match,
72 }, 71 },
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 58eb85770eba..b65317c6ea4e 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -572,7 +572,6 @@ err_no_irq:
572 572
573static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) 573static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
574{ 574{
575 struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
576 struct shdma_chan *schan; 575 struct shdma_chan *schan;
577 int i; 576 int i;
578 577
@@ -581,7 +580,6 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
581 580
582 shdma_chan_remove(schan); 581 shdma_chan_remove(schan);
583 } 582 }
584 dma_dev->chancnt = 0;
585} 583}
586 584
587static void sh_dmae_shutdown(struct platform_device *pdev) 585static void sh_dmae_shutdown(struct platform_device *pdev)
diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c
index 3ce103909896..6da2eaa6c294 100644
--- a/drivers/dma/sh/sudmac.c
+++ b/drivers/dma/sh/sudmac.c
@@ -295,7 +295,6 @@ err_no_irq:
295 295
296static void sudmac_chan_remove(struct sudmac_device *su_dev) 296static void sudmac_chan_remove(struct sudmac_device *su_dev)
297{ 297{
298 struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev;
299 struct shdma_chan *schan; 298 struct shdma_chan *schan;
300 int i; 299 int i;
301 300
@@ -304,7 +303,6 @@ static void sudmac_chan_remove(struct sudmac_device *su_dev)
304 303
305 shdma_chan_remove(schan); 304 shdma_chan_remove(schan);
306 } 305 }
307 dma_dev->chancnt = 0;
308} 306}
309 307
310static dma_addr_t sudmac_slave_addr(struct shdma_chan *schan) 308static dma_addr_t sudmac_slave_addr(struct shdma_chan *schan)
@@ -411,7 +409,6 @@ static int sudmac_remove(struct platform_device *pdev)
411 409
412static struct platform_driver sudmac_driver = { 410static struct platform_driver sudmac_driver = {
413 .driver = { 411 .driver = {
414 .owner = THIS_MODULE,
415 .name = SUDMAC_DRV_NAME, 412 .name = SUDMAC_DRV_NAME,
416 }, 413 },
417 .probe = sudmac_probe, 414 .probe = sudmac_probe,
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index aac03ab10c54..feb1e8ab8d7b 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -735,7 +735,6 @@ static int sirfsoc_dma_probe(struct platform_device *op)
735 735
736 dma = &sdma->dma; 736 dma = &sdma->dma;
737 dma->dev = dev; 737 dma->dev = dev;
738 dma->chancnt = SIRFSOC_DMA_CHANNELS;
739 738
740 dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources; 739 dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
741 dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources; 740 dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
@@ -752,7 +751,7 @@ static int sirfsoc_dma_probe(struct platform_device *op)
752 dma_cap_set(DMA_INTERLEAVE, dma->cap_mask); 751 dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
753 dma_cap_set(DMA_PRIVATE, dma->cap_mask); 752 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
754 753
755 for (i = 0; i < dma->chancnt; i++) { 754 for (i = 0; i < SIRFSOC_DMA_CHANNELS; i++) {
756 schan = &sdma->channels[i]; 755 schan = &sdma->channels[i];
757 756
758 schan->chan.device = dma; 757 schan->chan.device = dma;
@@ -835,6 +834,7 @@ static int sirfsoc_dma_runtime_resume(struct device *dev)
835 return 0; 834 return 0;
836} 835}
837 836
837#ifdef CONFIG_PM_SLEEP
838static int sirfsoc_dma_pm_suspend(struct device *dev) 838static int sirfsoc_dma_pm_suspend(struct device *dev)
839{ 839{
840 struct sirfsoc_dma *sdma = dev_get_drvdata(dev); 840 struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
@@ -916,6 +916,7 @@ static int sirfsoc_dma_pm_resume(struct device *dev)
916 916
917 return 0; 917 return 0;
918} 918}
919#endif
919 920
920static const struct dev_pm_ops sirfsoc_dma_pm_ops = { 921static const struct dev_pm_ops sirfsoc_dma_pm_ops = {
921 SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL) 922 SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL)
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index d9ca3e32d748..4d0710648b08 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -3432,6 +3432,7 @@ static int __init d40_lcla_allocate(struct d40_base *base)
3432 3432
3433 d40_err(base->dev, "Failed to allocate %d pages.\n", 3433 d40_err(base->dev, "Failed to allocate %d pages.\n",
3434 base->lcla_pool.pages); 3434 base->lcla_pool.pages);
3435 ret = -ENOMEM;
3435 3436
3436 for (j = 0; j < i; j++) 3437 for (j = 0; j < i; j++)
3437 free_pages(page_list[j], base->lcla_pool.pages); 3438 free_pages(page_list[j], base->lcla_pool.pages);
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 91292f5513ff..159f1736a16f 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -18,6 +18,7 @@
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/of_dma.h> 20#include <linux/of_dma.h>
21#include <linux/of_device.h>
21#include <linux/platform_device.h> 22#include <linux/platform_device.h>
22#include <linux/reset.h> 23#include <linux/reset.h>
23#include <linux/slab.h> 24#include <linux/slab.h>
@@ -26,24 +27,6 @@
26#include "virt-dma.h" 27#include "virt-dma.h"
27 28
28/* 29/*
29 * There's 16 physical channels that can work in parallel.
30 *
31 * However we have 30 different endpoints for our requests.
32 *
33 * Since the channels are able to handle only an unidirectional
34 * transfer, we need to allocate more virtual channels so that
35 * everyone can grab one channel.
36 *
37 * Some devices can't work in both direction (mostly because it
38 * wouldn't make sense), so we have a bit fewer virtual channels than
39 * 2 channels per endpoints.
40 */
41
42#define NR_MAX_CHANNELS 16
43#define NR_MAX_REQUESTS 30
44#define NR_MAX_VCHANS 53
45
46/*
47 * Common registers 30 * Common registers
48 */ 31 */
49#define DMA_IRQ_EN(x) ((x) * 0x04) 32#define DMA_IRQ_EN(x) ((x) * 0x04)
@@ -60,6 +43,12 @@
60#define DMA_STAT 0x30 43#define DMA_STAT 0x30
61 44
62/* 45/*
46 * sun8i specific registers
47 */
48#define SUN8I_DMA_GATE 0x20
49#define SUN8I_DMA_GATE_ENABLE 0x4
50
51/*
63 * Channels specific registers 52 * Channels specific registers
64 */ 53 */
65#define DMA_CHAN_ENABLE 0x00 54#define DMA_CHAN_ENABLE 0x00
@@ -102,6 +91,19 @@
102#define DRQ_SDRAM 1 91#define DRQ_SDRAM 1
103 92
104/* 93/*
94 * Hardware channels / ports representation
95 *
96 * The hardware is used in several SoCs, with differing numbers
97 * of channels and endpoints. This structure ties those numbers
98 * to a certain compatible string.
99 */
100struct sun6i_dma_config {
101 u32 nr_max_channels;
102 u32 nr_max_requests;
103 u32 nr_max_vchans;
104};
105
106/*
105 * Hardware representation of the LLI 107 * Hardware representation of the LLI
106 * 108 *
107 * The hardware will be fed the physical address of this structure, 109 * The hardware will be fed the physical address of this structure,
@@ -159,6 +161,7 @@ struct sun6i_dma_dev {
159 struct dma_pool *pool; 161 struct dma_pool *pool;
160 struct sun6i_pchan *pchans; 162 struct sun6i_pchan *pchans;
161 struct sun6i_vchan *vchans; 163 struct sun6i_vchan *vchans;
164 const struct sun6i_dma_config *cfg;
162}; 165};
163 166
164static struct device *chan2dev(struct dma_chan *chan) 167static struct device *chan2dev(struct dma_chan *chan)
@@ -426,6 +429,7 @@ static int sun6i_dma_start_desc(struct sun6i_vchan *vchan)
426static void sun6i_dma_tasklet(unsigned long data) 429static void sun6i_dma_tasklet(unsigned long data)
427{ 430{
428 struct sun6i_dma_dev *sdev = (struct sun6i_dma_dev *)data; 431 struct sun6i_dma_dev *sdev = (struct sun6i_dma_dev *)data;
432 const struct sun6i_dma_config *cfg = sdev->cfg;
429 struct sun6i_vchan *vchan; 433 struct sun6i_vchan *vchan;
430 struct sun6i_pchan *pchan; 434 struct sun6i_pchan *pchan;
431 unsigned int pchan_alloc = 0; 435 unsigned int pchan_alloc = 0;
@@ -453,7 +457,7 @@ static void sun6i_dma_tasklet(unsigned long data)
453 } 457 }
454 458
455 spin_lock_irq(&sdev->lock); 459 spin_lock_irq(&sdev->lock);
456 for (pchan_idx = 0; pchan_idx < NR_MAX_CHANNELS; pchan_idx++) { 460 for (pchan_idx = 0; pchan_idx < cfg->nr_max_channels; pchan_idx++) {
457 pchan = &sdev->pchans[pchan_idx]; 461 pchan = &sdev->pchans[pchan_idx];
458 462
459 if (pchan->vchan || list_empty(&sdev->pending)) 463 if (pchan->vchan || list_empty(&sdev->pending))
@@ -474,7 +478,7 @@ static void sun6i_dma_tasklet(unsigned long data)
474 } 478 }
475 spin_unlock_irq(&sdev->lock); 479 spin_unlock_irq(&sdev->lock);
476 480
477 for (pchan_idx = 0; pchan_idx < NR_MAX_CHANNELS; pchan_idx++) { 481 for (pchan_idx = 0; pchan_idx < cfg->nr_max_channels; pchan_idx++) {
478 if (!(pchan_alloc & BIT(pchan_idx))) 482 if (!(pchan_alloc & BIT(pchan_idx)))
479 continue; 483 continue;
480 484
@@ -496,7 +500,7 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id)
496 int i, j, ret = IRQ_NONE; 500 int i, j, ret = IRQ_NONE;
497 u32 status; 501 u32 status;
498 502
499 for (i = 0; i < 2; i++) { 503 for (i = 0; i < sdev->cfg->nr_max_channels / DMA_IRQ_CHAN_NR; i++) {
500 status = readl(sdev->base + DMA_IRQ_STAT(i)); 504 status = readl(sdev->base + DMA_IRQ_STAT(i));
501 if (!status) 505 if (!status)
502 continue; 506 continue;
@@ -506,7 +510,7 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id)
506 510
507 writel(status, sdev->base + DMA_IRQ_STAT(i)); 511 writel(status, sdev->base + DMA_IRQ_STAT(i));
508 512
509 for (j = 0; (j < 8) && status; j++) { 513 for (j = 0; (j < DMA_IRQ_CHAN_NR) && status; j++) {
510 if (status & DMA_IRQ_QUEUE) { 514 if (status & DMA_IRQ_QUEUE) {
511 pchan = sdev->pchans + j; 515 pchan = sdev->pchans + j;
512 vchan = pchan->vchan; 516 vchan = pchan->vchan;
@@ -519,7 +523,7 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id)
519 } 523 }
520 } 524 }
521 525
522 status = status >> 4; 526 status = status >> DMA_IRQ_CHAN_WIDTH;
523 } 527 }
524 528
525 if (!atomic_read(&sdev->tasklet_shutdown)) 529 if (!atomic_read(&sdev->tasklet_shutdown))
@@ -815,7 +819,7 @@ static struct dma_chan *sun6i_dma_of_xlate(struct of_phandle_args *dma_spec,
815 struct dma_chan *chan; 819 struct dma_chan *chan;
816 u8 port = dma_spec->args[0]; 820 u8 port = dma_spec->args[0];
817 821
818 if (port > NR_MAX_REQUESTS) 822 if (port > sdev->cfg->nr_max_requests)
819 return NULL; 823 return NULL;
820 824
821 chan = dma_get_any_slave_channel(&sdev->slave); 825 chan = dma_get_any_slave_channel(&sdev->slave);
@@ -848,7 +852,7 @@ static inline void sun6i_dma_free(struct sun6i_dma_dev *sdev)
848{ 852{
849 int i; 853 int i;
850 854
851 for (i = 0; i < NR_MAX_VCHANS; i++) { 855 for (i = 0; i < sdev->cfg->nr_max_vchans; i++) {
852 struct sun6i_vchan *vchan = &sdev->vchans[i]; 856 struct sun6i_vchan *vchan = &sdev->vchans[i];
853 857
854 list_del(&vchan->vc.chan.device_node); 858 list_del(&vchan->vc.chan.device_node);
@@ -856,8 +860,48 @@ static inline void sun6i_dma_free(struct sun6i_dma_dev *sdev)
856 } 860 }
857} 861}
858 862
863/*
864 * For A31:
865 *
866 * There's 16 physical channels that can work in parallel.
867 *
868 * However we have 30 different endpoints for our requests.
869 *
870 * Since the channels are able to handle only an unidirectional
871 * transfer, we need to allocate more virtual channels so that
872 * everyone can grab one channel.
873 *
874 * Some devices can't work in both direction (mostly because it
875 * wouldn't make sense), so we have a bit fewer virtual channels than
876 * 2 channels per endpoints.
877 */
878
879static struct sun6i_dma_config sun6i_a31_dma_cfg = {
880 .nr_max_channels = 16,
881 .nr_max_requests = 30,
882 .nr_max_vchans = 53,
883};
884
885/*
886 * The A23 only has 8 physical channels, a maximum DRQ port id of 24,
887 * and a total of 37 usable source and destination endpoints.
888 */
889
890static struct sun6i_dma_config sun8i_a23_dma_cfg = {
891 .nr_max_channels = 8,
892 .nr_max_requests = 24,
893 .nr_max_vchans = 37,
894};
895
896static struct of_device_id sun6i_dma_match[] = {
897 { .compatible = "allwinner,sun6i-a31-dma", .data = &sun6i_a31_dma_cfg },
898 { .compatible = "allwinner,sun8i-a23-dma", .data = &sun8i_a23_dma_cfg },
899 { /* sentinel */ }
900};
901
859static int sun6i_dma_probe(struct platform_device *pdev) 902static int sun6i_dma_probe(struct platform_device *pdev)
860{ 903{
904 const struct of_device_id *device;
861 struct sun6i_dma_dev *sdc; 905 struct sun6i_dma_dev *sdc;
862 struct resource *res; 906 struct resource *res;
863 int ret, i; 907 int ret, i;
@@ -866,6 +910,11 @@ static int sun6i_dma_probe(struct platform_device *pdev)
866 if (!sdc) 910 if (!sdc)
867 return -ENOMEM; 911 return -ENOMEM;
868 912
913 device = of_match_device(sun6i_dma_match, &pdev->dev);
914 if (!device)
915 return -ENODEV;
916 sdc->cfg = device->data;
917
869 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 918 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
870 sdc->base = devm_ioremap_resource(&pdev->dev, res); 919 sdc->base = devm_ioremap_resource(&pdev->dev, res);
871 if (IS_ERR(sdc->base)) 920 if (IS_ERR(sdc->base))
@@ -912,31 +961,30 @@ static int sun6i_dma_probe(struct platform_device *pdev)
912 sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg; 961 sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg;
913 sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy; 962 sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy;
914 sdc->slave.device_control = sun6i_dma_control; 963 sdc->slave.device_control = sun6i_dma_control;
915 sdc->slave.chancnt = NR_MAX_VCHANS;
916 sdc->slave.copy_align = 4; 964 sdc->slave.copy_align = 4;
917 965
918 sdc->slave.dev = &pdev->dev; 966 sdc->slave.dev = &pdev->dev;
919 967
920 sdc->pchans = devm_kcalloc(&pdev->dev, NR_MAX_CHANNELS, 968 sdc->pchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_channels,
921 sizeof(struct sun6i_pchan), GFP_KERNEL); 969 sizeof(struct sun6i_pchan), GFP_KERNEL);
922 if (!sdc->pchans) 970 if (!sdc->pchans)
923 return -ENOMEM; 971 return -ENOMEM;
924 972
925 sdc->vchans = devm_kcalloc(&pdev->dev, NR_MAX_VCHANS, 973 sdc->vchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_vchans,
926 sizeof(struct sun6i_vchan), GFP_KERNEL); 974 sizeof(struct sun6i_vchan), GFP_KERNEL);
927 if (!sdc->vchans) 975 if (!sdc->vchans)
928 return -ENOMEM; 976 return -ENOMEM;
929 977
930 tasklet_init(&sdc->task, sun6i_dma_tasklet, (unsigned long)sdc); 978 tasklet_init(&sdc->task, sun6i_dma_tasklet, (unsigned long)sdc);
931 979
932 for (i = 0; i < NR_MAX_CHANNELS; i++) { 980 for (i = 0; i < sdc->cfg->nr_max_channels; i++) {
933 struct sun6i_pchan *pchan = &sdc->pchans[i]; 981 struct sun6i_pchan *pchan = &sdc->pchans[i];
934 982
935 pchan->idx = i; 983 pchan->idx = i;
936 pchan->base = sdc->base + 0x100 + i * 0x40; 984 pchan->base = sdc->base + 0x100 + i * 0x40;
937 } 985 }
938 986
939 for (i = 0; i < NR_MAX_VCHANS; i++) { 987 for (i = 0; i < sdc->cfg->nr_max_vchans; i++) {
940 struct sun6i_vchan *vchan = &sdc->vchans[i]; 988 struct sun6i_vchan *vchan = &sdc->vchans[i];
941 989
942 INIT_LIST_HEAD(&vchan->node); 990 INIT_LIST_HEAD(&vchan->node);
@@ -976,6 +1024,15 @@ static int sun6i_dma_probe(struct platform_device *pdev)
976 goto err_dma_unregister; 1024 goto err_dma_unregister;
977 } 1025 }
978 1026
1027 /*
1028 * sun8i variant requires us to toggle a dma gating register,
1029 * as seen in Allwinner's SDK. This register is not documented
1030 * in the A23 user manual.
1031 */
1032 if (of_device_is_compatible(pdev->dev.of_node,
1033 "allwinner,sun8i-a23-dma"))
1034 writel(SUN8I_DMA_GATE_ENABLE, sdc->base + SUN8I_DMA_GATE);
1035
979 return 0; 1036 return 0;
980 1037
981err_dma_unregister: 1038err_dma_unregister:
@@ -1008,11 +1065,6 @@ static int sun6i_dma_remove(struct platform_device *pdev)
1008 return 0; 1065 return 0;
1009} 1066}
1010 1067
1011static struct of_device_id sun6i_dma_match[] = {
1012 { .compatible = "allwinner,sun6i-a31-dma" },
1013 { /* sentinel */ }
1014};
1015
1016static struct platform_driver sun6i_dma_driver = { 1068static struct platform_driver sun6i_dma_driver = {
1017 .probe = sun6i_dma_probe, 1069 .probe = sun6i_dma_probe,
1018 .remove = sun6i_dma_remove, 1070 .remove = sun6i_dma_remove,
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 1c867d0303db..d8450c3f35f0 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -1597,7 +1597,6 @@ static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
1597static struct platform_driver tegra_dmac_driver = { 1597static struct platform_driver tegra_dmac_driver = {
1598 .driver = { 1598 .driver = {
1599 .name = "tegra-apbdma", 1599 .name = "tegra-apbdma",
1600 .owner = THIS_MODULE,
1601 .pm = &tegra_dma_dev_pm_ops, 1600 .pm = &tegra_dma_dev_pm_ops,
1602 .of_match_table = tegra_dma_of_match, 1601 .of_match_table = tegra_dma_of_match,
1603 }, 1602 },
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 4506a7b4f972..2407ccf1a64b 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -783,7 +783,6 @@ static int td_remove(struct platform_device *pdev)
783static struct platform_driver td_driver = { 783static struct platform_driver td_driver = {
784 .driver = { 784 .driver = {
785 .name = DRIVER_NAME, 785 .name = DRIVER_NAME,
786 .owner = THIS_MODULE,
787 }, 786 },
788 .probe = td_probe, 787 .probe = td_probe,
789 .remove = td_remove, 788 .remove = td_remove,
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index a6e64767186e..4a3a8f3137b3 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -942,6 +942,9 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
942 if (!xt->numf || !xt->sgl[0].size) 942 if (!xt->numf || !xt->sgl[0].size)
943 return NULL; 943 return NULL;
944 944
945 if (xt->frame_size != 1)
946 return NULL;
947
945 /* Allocate a transaction descriptor. */ 948 /* Allocate a transaction descriptor. */
946 desc = xilinx_vdma_alloc_tx_descriptor(chan); 949 desc = xilinx_vdma_alloc_tx_descriptor(chan);
947 if (!desc) 950 if (!desc)
@@ -960,7 +963,7 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
960 hw = &segment->hw; 963 hw = &segment->hw;
961 hw->vsize = xt->numf; 964 hw->vsize = xt->numf;
962 hw->hsize = xt->sgl[0].size; 965 hw->hsize = xt->sgl[0].size;
963 hw->stride = xt->sgl[0].icg << 966 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
964 XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT; 967 XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT;
965 hw->stride |= chan->config.frm_dly << 968 hw->stride |= chan->config.frm_dly <<
966 XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT; 969 XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
@@ -971,9 +974,11 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
971 hw->buf_addr = xt->src_start; 974 hw->buf_addr = xt->src_start;
972 975
973 /* Link the previous next descriptor to current */ 976 /* Link the previous next descriptor to current */
974 prev = list_last_entry(&desc->segments, 977 if (!list_empty(&desc->segments)) {
975 struct xilinx_vdma_tx_segment, node); 978 prev = list_last_entry(&desc->segments,
976 prev->hw.next_desc = segment->phys; 979 struct xilinx_vdma_tx_segment, node);
980 prev->hw.next_desc = segment->phys;
981 }
977 982
978 /* Insert the segment into the descriptor segments list. */ 983 /* Insert the segment into the descriptor segments list. */
979 list_add_tail(&segment->node, &desc->segments); 984 list_add_tail(&segment->node, &desc->segments);