diff options
author | Ludovic Desroches <ludovic.desroches@atmel.com> | 2014-10-22 11:22:18 -0400 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2014-11-06 00:30:08 -0500 |
commit | e1f7c9eee70730d7e6ec77f7ecc76f936e262cf0 (patch) | |
tree | f6171edef13157c8af7a54f58aebe3ae55d24bcd | |
parent | 0df1f2487d2f0d04703f142813d53615d62a1da4 (diff) |
dmaengine: at_xdmac: creation of the atmel eXtended DMA Controller driver
New atmel DMA controller known as XDMAC, introduced with SAMA5D4
devices.
Signed-off-by: Ludovic Desroches <ludovic.desroches@atmel.com>
Acked-by: Nicolas Ferre <nicolas.ferre@atmel.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r-- | drivers/dma/Kconfig | 7 | ||||
-rw-r--r-- | drivers/dma/Makefile | 1 | ||||
-rw-r--r-- | drivers/dma/at_xdmac.c | 1510 | ||||
-rw-r--r-- | include/dt-bindings/dma/at91.h | 25 |
4 files changed, 1543 insertions, 0 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index de469821bc1b..607271a999a9 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -107,6 +107,13 @@ config AT_HDMAC | |||
107 | help | 107 | help |
108 | Support the Atmel AHB DMA controller. | 108 | Support the Atmel AHB DMA controller. |
109 | 109 | ||
110 | config AT_XDMAC | ||
111 | tristate "Atmel XDMA support" | ||
112 | depends on (ARCH_AT91 || COMPILE_TEST) | ||
113 | select DMA_ENGINE | ||
114 | help | ||
115 | Support the Atmel XDMA controller. | ||
116 | |||
110 | config FSL_DMA | 117 | config FSL_DMA |
111 | tristate "Freescale Elo series DMA support" | 118 | tristate "Freescale Elo series DMA support" |
112 | depends on FSL_SOC | 119 | depends on FSL_SOC |
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index cb626c179911..2022b5451377 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -16,6 +16,7 @@ obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ | |||
16 | obj-$(CONFIG_MV_XOR) += mv_xor.o | 16 | obj-$(CONFIG_MV_XOR) += mv_xor.o |
17 | obj-$(CONFIG_DW_DMAC_CORE) += dw/ | 17 | obj-$(CONFIG_DW_DMAC_CORE) += dw/ |
18 | obj-$(CONFIG_AT_HDMAC) += at_hdmac.o | 18 | obj-$(CONFIG_AT_HDMAC) += at_hdmac.o |
19 | obj-$(CONFIG_AT_XDMAC) += at_xdmac.o | ||
19 | obj-$(CONFIG_MX3_IPU) += ipu/ | 20 | obj-$(CONFIG_MX3_IPU) += ipu/ |
20 | obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o | 21 | obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o |
21 | obj-$(CONFIG_SH_DMAE_BASE) += sh/ | 22 | obj-$(CONFIG_SH_DMAE_BASE) += sh/ |
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c new file mode 100644 index 000000000000..4e9b023990ae --- /dev/null +++ b/drivers/dma/at_xdmac.c | |||
@@ -0,0 +1,1510 @@ | |||
1 | /* | ||
2 | * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems) | ||
3 | * | ||
4 | * Copyright (C) 2014 Atmel Corporation | ||
5 | * | ||
6 | * Author: Ludovic Desroches <ludovic.desroches@atmel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License version 2 as published by | ||
10 | * the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #include <asm/barrier.h> | ||
22 | #include <dt-bindings/dma/at91.h> | ||
23 | #include <linux/clk.h> | ||
24 | #include <linux/dmaengine.h> | ||
25 | #include <linux/dmapool.h> | ||
26 | #include <linux/interrupt.h> | ||
27 | #include <linux/irq.h> | ||
28 | #include <linux/list.h> | ||
29 | #include <linux/module.h> | ||
30 | #include <linux/of_dma.h> | ||
31 | #include <linux/of_platform.h> | ||
32 | #include <linux/platform_device.h> | ||
33 | #include <linux/pm.h> | ||
34 | |||
35 | #include "dmaengine.h" | ||
36 | |||
37 | /* Global registers */ | ||
38 | #define AT_XDMAC_GTYPE 0x00 /* Global Type Register */ | ||
39 | #define AT_XDMAC_NB_CH(i) (((i) & 0x1F) + 1) /* Number of Channels Minus One */ | ||
40 | #define AT_XDMAC_FIFO_SZ(i) (((i) >> 5) & 0x7FF) /* Number of Bytes */ | ||
41 | #define AT_XDMAC_NB_REQ(i) ((((i) >> 16) & 0x3F) + 1) /* Number of Peripheral Requests Minus One */ | ||
42 | #define AT_XDMAC_GCFG 0x04 /* Global Configuration Register */ | ||
43 | #define AT_XDMAC_GWAC 0x08 /* Global Weighted Arbiter Configuration Register */ | ||
44 | #define AT_XDMAC_GIE 0x0C /* Global Interrupt Enable Register */ | ||
45 | #define AT_XDMAC_GID 0x10 /* Global Interrupt Disable Register */ | ||
46 | #define AT_XDMAC_GIM 0x14 /* Global Interrupt Mask Register */ | ||
47 | #define AT_XDMAC_GIS 0x18 /* Global Interrupt Status Register */ | ||
48 | #define AT_XDMAC_GE 0x1C /* Global Channel Enable Register */ | ||
49 | #define AT_XDMAC_GD 0x20 /* Global Channel Disable Register */ | ||
50 | #define AT_XDMAC_GS 0x24 /* Global Channel Status Register */ | ||
51 | #define AT_XDMAC_GRS 0x28 /* Global Channel Read Suspend Register */ | ||
52 | #define AT_XDMAC_GWS 0x2C /* Global Write Suspend Register */ | ||
53 | #define AT_XDMAC_GRWS 0x30 /* Global Channel Read Write Suspend Register */ | ||
54 | #define AT_XDMAC_GRWR 0x34 /* Global Channel Read Write Resume Register */ | ||
55 | #define AT_XDMAC_GSWR 0x38 /* Global Channel Software Request Register */ | ||
56 | #define AT_XDMAC_GSWS 0x3C /* Global channel Software Request Status Register */ | ||
57 | #define AT_XDMAC_GSWF 0x40 /* Global Channel Software Flush Request Register */ | ||
58 | #define AT_XDMAC_VERSION 0xFFC /* XDMAC Version Register */ | ||
59 | |||
60 | /* Channel relative registers offsets */ | ||
61 | #define AT_XDMAC_CIE 0x00 /* Channel Interrupt Enable Register */ | ||
62 | #define AT_XDMAC_CIE_BIE BIT(0) /* End of Block Interrupt Enable Bit */ | ||
63 | #define AT_XDMAC_CIE_LIE BIT(1) /* End of Linked List Interrupt Enable Bit */ | ||
64 | #define AT_XDMAC_CIE_DIE BIT(2) /* End of Disable Interrupt Enable Bit */ | ||
65 | #define AT_XDMAC_CIE_FIE BIT(3) /* End of Flush Interrupt Enable Bit */ | ||
66 | #define AT_XDMAC_CIE_RBEIE BIT(4) /* Read Bus Error Interrupt Enable Bit */ | ||
67 | #define AT_XDMAC_CIE_WBEIE BIT(5) /* Write Bus Error Interrupt Enable Bit */ | ||
68 | #define AT_XDMAC_CIE_ROIE BIT(6) /* Request Overflow Interrupt Enable Bit */ | ||
69 | #define AT_XDMAC_CID 0x04 /* Channel Interrupt Disable Register */ | ||
70 | #define AT_XDMAC_CID_BID BIT(0) /* End of Block Interrupt Disable Bit */ | ||
71 | #define AT_XDMAC_CID_LID BIT(1) /* End of Linked List Interrupt Disable Bit */ | ||
72 | #define AT_XDMAC_CID_DID BIT(2) /* End of Disable Interrupt Disable Bit */ | ||
73 | #define AT_XDMAC_CID_FID BIT(3) /* End of Flush Interrupt Disable Bit */ | ||
74 | #define AT_XDMAC_CID_RBEID BIT(4) /* Read Bus Error Interrupt Disable Bit */ | ||
75 | #define AT_XDMAC_CID_WBEID BIT(5) /* Write Bus Error Interrupt Disable Bit */ | ||
76 | #define AT_XDMAC_CID_ROID BIT(6) /* Request Overflow Interrupt Disable Bit */ | ||
77 | #define AT_XDMAC_CIM 0x08 /* Channel Interrupt Mask Register */ | ||
78 | #define AT_XDMAC_CIM_BIM BIT(0) /* End of Block Interrupt Mask Bit */ | ||
79 | #define AT_XDMAC_CIM_LIM BIT(1) /* End of Linked List Interrupt Mask Bit */ | ||
80 | #define AT_XDMAC_CIM_DIM BIT(2) /* End of Disable Interrupt Mask Bit */ | ||
81 | #define AT_XDMAC_CIM_FIM BIT(3) /* End of Flush Interrupt Mask Bit */ | ||
82 | #define AT_XDMAC_CIM_RBEIM BIT(4) /* Read Bus Error Interrupt Mask Bit */ | ||
83 | #define AT_XDMAC_CIM_WBEIM BIT(5) /* Write Bus Error Interrupt Mask Bit */ | ||
84 | #define AT_XDMAC_CIM_ROIM BIT(6) /* Request Overflow Interrupt Mask Bit */ | ||
85 | #define AT_XDMAC_CIS 0x0C /* Channel Interrupt Status Register */ | ||
86 | #define AT_XDMAC_CIS_BIS BIT(0) /* End of Block Interrupt Status Bit */ | ||
87 | #define AT_XDMAC_CIS_LIS BIT(1) /* End of Linked List Interrupt Status Bit */ | ||
88 | #define AT_XDMAC_CIS_DIS BIT(2) /* End of Disable Interrupt Status Bit */ | ||
89 | #define AT_XDMAC_CIS_FIS BIT(3) /* End of Flush Interrupt Status Bit */ | ||
90 | #define AT_XDMAC_CIS_RBEIS BIT(4) /* Read Bus Error Interrupt Status Bit */ | ||
91 | #define AT_XDMAC_CIS_WBEIS BIT(5) /* Write Bus Error Interrupt Status Bit */ | ||
92 | #define AT_XDMAC_CIS_ROIS BIT(6) /* Request Overflow Interrupt Status Bit */ | ||
93 | #define AT_XDMAC_CSA 0x10 /* Channel Source Address Register */ | ||
94 | #define AT_XDMAC_CDA 0x14 /* Channel Destination Address Register */ | ||
95 | #define AT_XDMAC_CNDA 0x18 /* Channel Next Descriptor Address Register */ | ||
96 | #define AT_XDMAC_CNDA_NDAIF(i) ((i) & 0x1) /* Channel x Next Descriptor Interface */ | ||
97 | #define AT_XDMAC_CNDA_NDA(i) ((i) & 0xfffffffc) /* Channel x Next Descriptor Address */ | ||
98 | #define AT_XDMAC_CNDC 0x1C /* Channel Next Descriptor Control Register */ | ||
99 | #define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */ | ||
100 | #define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */ | ||
101 | #define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */ | ||
102 | #define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */ | ||
103 | #define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */ | ||
104 | #define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */ | ||
105 | #define AT_XDMAC_CNDC_NDVIEW_NDV3 (0x3 << 3) /* Channel x Next Descriptor View 3 */ | ||
106 | #define AT_XDMAC_CUBC 0x20 /* Channel Microblock Control Register */ | ||
107 | #define AT_XDMAC_CBC 0x24 /* Channel Block Control Register */ | ||
108 | #define AT_XDMAC_CC 0x28 /* Channel Configuration Register */ | ||
109 | #define AT_XDMAC_CC_TYPE (0x1 << 0) /* Channel Transfer Type */ | ||
110 | #define AT_XDMAC_CC_TYPE_MEM_TRAN (0x0 << 0) /* Memory to Memory Transfer */ | ||
111 | #define AT_XDMAC_CC_TYPE_PER_TRAN (0x1 << 0) /* Peripheral to Memory or Memory to Peripheral Transfer */ | ||
112 | #define AT_XDMAC_CC_MBSIZE_MASK (0x3 << 1) | ||
113 | #define AT_XDMAC_CC_MBSIZE_SINGLE (0x0 << 1) | ||
114 | #define AT_XDMAC_CC_MBSIZE_FOUR (0x1 << 1) | ||
115 | #define AT_XDMAC_CC_MBSIZE_EIGHT (0x2 << 1) | ||
116 | #define AT_XDMAC_CC_MBSIZE_SIXTEEN (0x3 << 1) | ||
117 | #define AT_XDMAC_CC_DSYNC (0x1 << 4) /* Channel Synchronization */ | ||
118 | #define AT_XDMAC_CC_DSYNC_PER2MEM (0x0 << 4) | ||
119 | #define AT_XDMAC_CC_DSYNC_MEM2PER (0x1 << 4) | ||
120 | #define AT_XDMAC_CC_PROT (0x1 << 5) /* Channel Protection */ | ||
121 | #define AT_XDMAC_CC_PROT_SEC (0x0 << 5) | ||
122 | #define AT_XDMAC_CC_PROT_UNSEC (0x1 << 5) | ||
123 | #define AT_XDMAC_CC_SWREQ (0x1 << 6) /* Channel Software Request Trigger */ | ||
124 | #define AT_XDMAC_CC_SWREQ_HWR_CONNECTED (0x0 << 6) | ||
125 | #define AT_XDMAC_CC_SWREQ_SWR_CONNECTED (0x1 << 6) | ||
126 | #define AT_XDMAC_CC_MEMSET (0x1 << 7) /* Channel Fill Block of memory */ | ||
127 | #define AT_XDMAC_CC_MEMSET_NORMAL_MODE (0x0 << 7) | ||
128 | #define AT_XDMAC_CC_MEMSET_HW_MODE (0x1 << 7) | ||
129 | #define AT_XDMAC_CC_CSIZE(i) ((0x7 & (i)) << 8) /* Channel Chunk Size */ | ||
130 | #define AT_XDMAC_CC_DWIDTH_OFFSET 11 | ||
131 | #define AT_XDMAC_CC_DWIDTH_MASK (0x3 << AT_XDMAC_CC_DWIDTH_OFFSET) | ||
132 | #define AT_XDMAC_CC_DWIDTH(i) ((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET) /* Channel Data Width */ | ||
133 | #define AT_XDMAC_CC_DWIDTH_BYTE 0x0 | ||
134 | #define AT_XDMAC_CC_DWIDTH_HALFWORD 0x1 | ||
135 | #define AT_XDMAC_CC_DWIDTH_WORD 0x2 | ||
136 | #define AT_XDMAC_CC_DWIDTH_DWORD 0x3 | ||
137 | #define AT_XDMAC_CC_SIF(i) ((0x1 & (i)) << 13) /* Channel Source Interface Identifier */ | ||
138 | #define AT_XDMAC_CC_DIF(i) ((0x1 & (i)) << 14) /* Channel Destination Interface Identifier */ | ||
139 | #define AT_XDMAC_CC_SAM_MASK (0x3 << 16) /* Channel Source Addressing Mode */ | ||
140 | #define AT_XDMAC_CC_SAM_FIXED_AM (0x0 << 16) | ||
141 | #define AT_XDMAC_CC_SAM_INCREMENTED_AM (0x1 << 16) | ||
142 | #define AT_XDMAC_CC_SAM_UBS_AM (0x2 << 16) | ||
143 | #define AT_XDMAC_CC_SAM_UBS_DS_AM (0x3 << 16) | ||
144 | #define AT_XDMAC_CC_DAM_MASK (0x3 << 18) /* Channel Source Addressing Mode */ | ||
145 | #define AT_XDMAC_CC_DAM_FIXED_AM (0x0 << 18) | ||
146 | #define AT_XDMAC_CC_DAM_INCREMENTED_AM (0x1 << 18) | ||
147 | #define AT_XDMAC_CC_DAM_UBS_AM (0x2 << 18) | ||
148 | #define AT_XDMAC_CC_DAM_UBS_DS_AM (0x3 << 18) | ||
149 | #define AT_XDMAC_CC_INITD (0x1 << 21) /* Channel Initialization Terminated (read only) */ | ||
150 | #define AT_XDMAC_CC_INITD_TERMINATED (0x0 << 21) | ||
151 | #define AT_XDMAC_CC_INITD_IN_PROGRESS (0x1 << 21) | ||
152 | #define AT_XDMAC_CC_RDIP (0x1 << 22) /* Read in Progress (read only) */ | ||
153 | #define AT_XDMAC_CC_RDIP_DONE (0x0 << 22) | ||
154 | #define AT_XDMAC_CC_RDIP_IN_PROGRESS (0x1 << 22) | ||
155 | #define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */ | ||
156 | #define AT_XDMAC_CC_WRIP_DONE (0x0 << 23) | ||
157 | #define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23) | ||
158 | #define AT_XDMAC_CC_PERID(i) (0x7f & (h) << 24) /* Channel Peripheral Identifier */ | ||
159 | #define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */ | ||
160 | #define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */ | ||
161 | #define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */ | ||
162 | |||
163 | #define AT_XDMAC_CHAN_REG_BASE 0x50 /* Channel registers base address */ | ||
164 | |||
165 | /* Microblock control members */ | ||
166 | #define AT_XDMAC_MBR_UBC_UBLEN_MAX 0xFFFFFFUL /* Maximum Microblock Length */ | ||
167 | #define AT_XDMAC_MBR_UBC_NDE (0x1 << 24) /* Next Descriptor Enable */ | ||
168 | #define AT_XDMAC_MBR_UBC_NSEN (0x1 << 25) /* Next Descriptor Source Update */ | ||
169 | #define AT_XDMAC_MBR_UBC_NDEN (0x1 << 26) /* Next Descriptor Destination Update */ | ||
170 | #define AT_XDMAC_MBR_UBC_NDV0 (0x0 << 27) /* Next Descriptor View 0 */ | ||
171 | #define AT_XDMAC_MBR_UBC_NDV1 (0x1 << 27) /* Next Descriptor View 1 */ | ||
172 | #define AT_XDMAC_MBR_UBC_NDV2 (0x2 << 27) /* Next Descriptor View 2 */ | ||
173 | #define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */ | ||
174 | |||
175 | #define AT_XDMAC_MAX_CHAN 0x20 | ||
176 | |||
177 | enum atc_status { | ||
178 | AT_XDMAC_CHAN_IS_CYCLIC = 0, | ||
179 | AT_XDMAC_CHAN_IS_PAUSED, | ||
180 | }; | ||
181 | |||
182 | /* ----- Channels ----- */ | ||
183 | struct at_xdmac_chan { | ||
184 | struct dma_chan chan; | ||
185 | void __iomem *ch_regs; | ||
186 | u32 mask; /* Channel Mask */ | ||
187 | u32 cfg[3]; /* Channel Configuration Register */ | ||
188 | #define AT_XDMAC_CUR_CFG 0 /* Current channel conf */ | ||
189 | #define AT_XDMAC_DEV_TO_MEM_CFG 1 /* Predifined dev to mem channel conf */ | ||
190 | #define AT_XDMAC_MEM_TO_DEV_CFG 2 /* Predifined mem to dev channel conf */ | ||
191 | u8 perid; /* Peripheral ID */ | ||
192 | u8 perif; /* Peripheral Interface */ | ||
193 | u8 memif; /* Memory Interface */ | ||
194 | u32 per_src_addr; | ||
195 | u32 per_dst_addr; | ||
196 | u32 save_cim; | ||
197 | u32 save_cnda; | ||
198 | u32 save_cndc; | ||
199 | unsigned long status; | ||
200 | struct tasklet_struct tasklet; | ||
201 | |||
202 | spinlock_t lock; | ||
203 | |||
204 | struct list_head xfers_list; | ||
205 | struct list_head free_descs_list; | ||
206 | }; | ||
207 | |||
208 | |||
209 | /* ----- Controller ----- */ | ||
210 | struct at_xdmac { | ||
211 | struct dma_device dma; | ||
212 | void __iomem *regs; | ||
213 | int irq; | ||
214 | struct clk *clk; | ||
215 | u32 save_gim; | ||
216 | u32 save_gs; | ||
217 | struct dma_pool *at_xdmac_desc_pool; | ||
218 | struct at_xdmac_chan chan[0]; | ||
219 | }; | ||
220 | |||
221 | |||
222 | /* ----- Descriptors ----- */ | ||
223 | |||
224 | /* Linked List Descriptor */ | ||
225 | struct at_xdmac_lld { | ||
226 | dma_addr_t mbr_nda; /* Next Descriptor Member */ | ||
227 | u32 mbr_ubc; /* Microblock Control Member */ | ||
228 | dma_addr_t mbr_sa; /* Source Address Member */ | ||
229 | dma_addr_t mbr_da; /* Destination Address Member */ | ||
230 | u32 mbr_cfg; /* Configuration Register */ | ||
231 | }; | ||
232 | |||
233 | |||
234 | struct at_xdmac_desc { | ||
235 | struct at_xdmac_lld lld; | ||
236 | enum dma_transfer_direction direction; | ||
237 | struct dma_async_tx_descriptor tx_dma_desc; | ||
238 | struct list_head desc_node; | ||
239 | /* Following members are only used by the first descriptor */ | ||
240 | bool active_xfer; | ||
241 | unsigned int xfer_size; | ||
242 | struct list_head descs_list; | ||
243 | struct list_head xfer_node; | ||
244 | }; | ||
245 | |||
246 | static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb) | ||
247 | { | ||
248 | return atxdmac->regs + (AT_XDMAC_CHAN_REG_BASE + chan_nb * 0x40); | ||
249 | } | ||
250 | |||
251 | #define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg)) | ||
252 | #define at_xdmac_write(atxdmac, reg, value) \ | ||
253 | writel_relaxed((value), (atxdmac)->regs + (reg)) | ||
254 | |||
255 | #define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg)) | ||
256 | #define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg)) | ||
257 | |||
258 | static inline struct at_xdmac_chan *to_at_xdmac_chan(struct dma_chan *dchan) | ||
259 | { | ||
260 | return container_of(dchan, struct at_xdmac_chan, chan); | ||
261 | } | ||
262 | |||
263 | static struct device *chan2dev(struct dma_chan *chan) | ||
264 | { | ||
265 | return &chan->dev->device; | ||
266 | } | ||
267 | |||
268 | static inline struct at_xdmac *to_at_xdmac(struct dma_device *ddev) | ||
269 | { | ||
270 | return container_of(ddev, struct at_xdmac, dma); | ||
271 | } | ||
272 | |||
273 | static inline struct at_xdmac_desc *txd_to_at_desc(struct dma_async_tx_descriptor *txd) | ||
274 | { | ||
275 | return container_of(txd, struct at_xdmac_desc, tx_dma_desc); | ||
276 | } | ||
277 | |||
278 | static inline int at_xdmac_chan_is_cyclic(struct at_xdmac_chan *atchan) | ||
279 | { | ||
280 | return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); | ||
281 | } | ||
282 | |||
283 | static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan) | ||
284 | { | ||
285 | return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); | ||
286 | } | ||
287 | |||
288 | static inline int at_xdmac_csize(u32 maxburst) | ||
289 | { | ||
290 | int csize; | ||
291 | |||
292 | csize = ffs(maxburst) - 1; | ||
293 | if (csize > 4) | ||
294 | csize = -EINVAL; | ||
295 | |||
296 | return csize; | ||
297 | }; | ||
298 | |||
299 | static inline u8 at_xdmac_get_dwidth(u32 cfg) | ||
300 | { | ||
301 | return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET; | ||
302 | }; | ||
303 | |||
304 | static unsigned int init_nr_desc_per_channel = 64; | ||
305 | module_param(init_nr_desc_per_channel, uint, 0644); | ||
306 | MODULE_PARM_DESC(init_nr_desc_per_channel, | ||
307 | "initial descriptors per channel (default: 64)"); | ||
308 | |||
309 | |||
310 | static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan) | ||
311 | { | ||
312 | return at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask; | ||
313 | } | ||
314 | |||
315 | static void at_xdmac_off(struct at_xdmac *atxdmac) | ||
316 | { | ||
317 | at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L); | ||
318 | |||
319 | /* Wait that all chans are disabled. */ | ||
320 | while (at_xdmac_read(atxdmac, AT_XDMAC_GS)) | ||
321 | cpu_relax(); | ||
322 | |||
323 | at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L); | ||
324 | } | ||
325 | |||
326 | /* Call with lock hold. */ | ||
327 | static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan, | ||
328 | struct at_xdmac_desc *first) | ||
329 | { | ||
330 | struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); | ||
331 | u32 reg; | ||
332 | |||
333 | dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first); | ||
334 | |||
335 | if (at_xdmac_chan_is_enabled(atchan)) | ||
336 | return; | ||
337 | |||
338 | /* Set transfer as active to not try to start it again. */ | ||
339 | first->active_xfer = true; | ||
340 | |||
341 | /* Tell xdmac where to get the first descriptor. */ | ||
342 | reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys) | ||
343 | | AT_XDMAC_CNDA_NDAIF(atchan->memif); | ||
344 | at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg); | ||
345 | |||
346 | /* | ||
347 | * When doing memory to memory transfer we need to use the next | ||
348 | * descriptor view 2 since some fields of the configuration register | ||
349 | * depend on transfer size and src/dest addresses. | ||
350 | */ | ||
351 | if (is_slave_direction(first->direction)) { | ||
352 | reg = AT_XDMAC_CNDC_NDVIEW_NDV1; | ||
353 | if (first->direction == DMA_MEM_TO_DEV) | ||
354 | atchan->cfg[AT_XDMAC_CUR_CFG] = | ||
355 | atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; | ||
356 | else | ||
357 | atchan->cfg[AT_XDMAC_CUR_CFG] = | ||
358 | atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; | ||
359 | at_xdmac_chan_write(atchan, AT_XDMAC_CC, | ||
360 | atchan->cfg[AT_XDMAC_CUR_CFG]); | ||
361 | } else { | ||
362 | /* | ||
363 | * No need to write AT_XDMAC_CC reg, it will be done when the | ||
364 | * descriptor is fecthed. | ||
365 | */ | ||
366 | reg = AT_XDMAC_CNDC_NDVIEW_NDV2; | ||
367 | } | ||
368 | |||
369 | reg |= AT_XDMAC_CNDC_NDDUP | ||
370 | | AT_XDMAC_CNDC_NDSUP | ||
371 | | AT_XDMAC_CNDC_NDE; | ||
372 | at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, reg); | ||
373 | |||
374 | dev_vdbg(chan2dev(&atchan->chan), | ||
375 | "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n", | ||
376 | __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC), | ||
377 | at_xdmac_chan_read(atchan, AT_XDMAC_CNDA), | ||
378 | at_xdmac_chan_read(atchan, AT_XDMAC_CNDC), | ||
379 | at_xdmac_chan_read(atchan, AT_XDMAC_CSA), | ||
380 | at_xdmac_chan_read(atchan, AT_XDMAC_CDA), | ||
381 | at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); | ||
382 | |||
383 | at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff); | ||
384 | reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE | AT_XDMAC_CIE_ROIE; | ||
385 | /* | ||
386 | * There is no end of list when doing cyclic dma, we need to get | ||
387 | * an interrupt after each periods. | ||
388 | */ | ||
389 | if (at_xdmac_chan_is_cyclic(atchan)) | ||
390 | at_xdmac_chan_write(atchan, AT_XDMAC_CIE, | ||
391 | reg | AT_XDMAC_CIE_BIE); | ||
392 | else | ||
393 | at_xdmac_chan_write(atchan, AT_XDMAC_CIE, | ||
394 | reg | AT_XDMAC_CIE_LIE); | ||
395 | at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask); | ||
396 | dev_vdbg(chan2dev(&atchan->chan), | ||
397 | "%s: enable channel (0x%08x)\n", __func__, atchan->mask); | ||
398 | wmb(); | ||
399 | at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask); | ||
400 | |||
401 | dev_vdbg(chan2dev(&atchan->chan), | ||
402 | "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n", | ||
403 | __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC), | ||
404 | at_xdmac_chan_read(atchan, AT_XDMAC_CNDA), | ||
405 | at_xdmac_chan_read(atchan, AT_XDMAC_CNDC), | ||
406 | at_xdmac_chan_read(atchan, AT_XDMAC_CSA), | ||
407 | at_xdmac_chan_read(atchan, AT_XDMAC_CDA), | ||
408 | at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); | ||
409 | |||
410 | } | ||
411 | |||
412 | static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx) | ||
413 | { | ||
414 | struct at_xdmac_desc *desc = txd_to_at_desc(tx); | ||
415 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan); | ||
416 | dma_cookie_t cookie; | ||
417 | |||
418 | spin_lock_bh(&atchan->lock); | ||
419 | cookie = dma_cookie_assign(tx); | ||
420 | |||
421 | dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n", | ||
422 | __func__, atchan, desc); | ||
423 | list_add_tail(&desc->xfer_node, &atchan->xfers_list); | ||
424 | if (list_is_singular(&atchan->xfers_list)) | ||
425 | at_xdmac_start_xfer(atchan, desc); | ||
426 | |||
427 | spin_unlock_bh(&atchan->lock); | ||
428 | return cookie; | ||
429 | } | ||
430 | |||
431 | static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan, | ||
432 | gfp_t gfp_flags) | ||
433 | { | ||
434 | struct at_xdmac_desc *desc; | ||
435 | struct at_xdmac *atxdmac = to_at_xdmac(chan->device); | ||
436 | dma_addr_t phys; | ||
437 | |||
438 | desc = dma_pool_alloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys); | ||
439 | if (desc) { | ||
440 | memset(desc, 0, sizeof(*desc)); | ||
441 | INIT_LIST_HEAD(&desc->descs_list); | ||
442 | dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan); | ||
443 | desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit; | ||
444 | desc->tx_dma_desc.phys = phys; | ||
445 | } | ||
446 | |||
447 | return desc; | ||
448 | } | ||
449 | |||
450 | /* Call must be protected by lock. */ | ||
451 | static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan) | ||
452 | { | ||
453 | struct at_xdmac_desc *desc; | ||
454 | |||
455 | if (list_empty(&atchan->free_descs_list)) { | ||
456 | desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT); | ||
457 | } else { | ||
458 | desc = list_first_entry(&atchan->free_descs_list, | ||
459 | struct at_xdmac_desc, desc_node); | ||
460 | list_del(&desc->desc_node); | ||
461 | desc->active_xfer = false; | ||
462 | } | ||
463 | |||
464 | return desc; | ||
465 | } | ||
466 | |||
467 | static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec, | ||
468 | struct of_dma *of_dma) | ||
469 | { | ||
470 | struct at_xdmac *atxdmac = of_dma->of_dma_data; | ||
471 | struct at_xdmac_chan *atchan; | ||
472 | struct dma_chan *chan; | ||
473 | struct device *dev = atxdmac->dma.dev; | ||
474 | |||
475 | if (dma_spec->args_count != 1) { | ||
476 | dev_err(dev, "dma phandler args: bad number of args\n"); | ||
477 | return NULL; | ||
478 | } | ||
479 | |||
480 | chan = dma_get_any_slave_channel(&atxdmac->dma); | ||
481 | if (!chan) { | ||
482 | dev_err(dev, "can't get a dma channel\n"); | ||
483 | return NULL; | ||
484 | } | ||
485 | |||
486 | atchan = to_at_xdmac_chan(chan); | ||
487 | atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]); | ||
488 | atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]); | ||
489 | atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]); | ||
490 | dev_dbg(dev, "chan dt cfg: memif=%u perif=%u perid=%u\n", | ||
491 | atchan->memif, atchan->perif, atchan->perid); | ||
492 | |||
493 | return chan; | ||
494 | } | ||
495 | |||
496 | static int at_xdmac_set_slave_config(struct dma_chan *chan, | ||
497 | struct dma_slave_config *sconfig) | ||
498 | { | ||
499 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | ||
500 | u8 dwidth; | ||
501 | int csize; | ||
502 | |||
503 | atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] = | ||
504 | AT91_XDMAC_DT_PERID(atchan->perid) | ||
505 | | AT_XDMAC_CC_DAM_INCREMENTED_AM | ||
506 | | AT_XDMAC_CC_SAM_FIXED_AM | ||
507 | | AT_XDMAC_CC_DIF(atchan->memif) | ||
508 | | AT_XDMAC_CC_SIF(atchan->perif) | ||
509 | | AT_XDMAC_CC_SWREQ_HWR_CONNECTED | ||
510 | | AT_XDMAC_CC_DSYNC_PER2MEM | ||
511 | | AT_XDMAC_CC_MBSIZE_SIXTEEN | ||
512 | | AT_XDMAC_CC_TYPE_PER_TRAN; | ||
513 | csize = at_xdmac_csize(sconfig->src_maxburst); | ||
514 | if (csize < 0) { | ||
515 | dev_err(chan2dev(chan), "invalid src maxburst value\n"); | ||
516 | return -EINVAL; | ||
517 | } | ||
518 | atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_CSIZE(csize); | ||
519 | dwidth = ffs(sconfig->src_addr_width) - 1; | ||
520 | atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth); | ||
521 | |||
522 | |||
523 | atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] = | ||
524 | AT91_XDMAC_DT_PERID(atchan->perid) | ||
525 | | AT_XDMAC_CC_DAM_FIXED_AM | ||
526 | | AT_XDMAC_CC_SAM_INCREMENTED_AM | ||
527 | | AT_XDMAC_CC_DIF(atchan->perif) | ||
528 | | AT_XDMAC_CC_SIF(atchan->memif) | ||
529 | | AT_XDMAC_CC_SWREQ_HWR_CONNECTED | ||
530 | | AT_XDMAC_CC_DSYNC_MEM2PER | ||
531 | | AT_XDMAC_CC_MBSIZE_SIXTEEN | ||
532 | | AT_XDMAC_CC_TYPE_PER_TRAN; | ||
533 | csize = at_xdmac_csize(sconfig->dst_maxburst); | ||
534 | if (csize < 0) { | ||
535 | dev_err(chan2dev(chan), "invalid src maxburst value\n"); | ||
536 | return -EINVAL; | ||
537 | } | ||
538 | atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_CSIZE(csize); | ||
539 | dwidth = ffs(sconfig->dst_addr_width) - 1; | ||
540 | atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth); | ||
541 | |||
542 | /* Src and dst addr are needed to configure the link list descriptor. */ | ||
543 | atchan->per_src_addr = sconfig->src_addr; | ||
544 | atchan->per_dst_addr = sconfig->dst_addr; | ||
545 | |||
546 | dev_dbg(chan2dev(chan), | ||
547 | "%s: cfg[dev2mem]=0x%08x, cfg[mem2dev]=0x%08x, per_src_addr=0x%08x, per_dst_addr=0x%08x\n", | ||
548 | __func__, atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG], | ||
549 | atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG], | ||
550 | atchan->per_src_addr, atchan->per_dst_addr); | ||
551 | |||
552 | return 0; | ||
553 | } | ||
554 | |||
555 | static struct dma_async_tx_descriptor * | ||
556 | at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | ||
557 | unsigned int sg_len, enum dma_transfer_direction direction, | ||
558 | unsigned long flags, void *context) | ||
559 | { | ||
560 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | ||
561 | struct at_xdmac_desc *first = NULL, *prev = NULL; | ||
562 | struct scatterlist *sg; | ||
563 | int i; | ||
564 | u32 cfg; | ||
565 | |||
566 | if (!sgl) | ||
567 | return NULL; | ||
568 | |||
569 | if (!is_slave_direction(direction)) { | ||
570 | dev_err(chan2dev(chan), "invalid DMA direction\n"); | ||
571 | return NULL; | ||
572 | } | ||
573 | |||
574 | dev_dbg(chan2dev(chan), "%s: sg_len=%d, dir=%s, flags=0x%lx\n", | ||
575 | __func__, sg_len, | ||
576 | direction == DMA_MEM_TO_DEV ? "to device" : "from device", | ||
577 | flags); | ||
578 | |||
579 | /* Protect dma_sconfig field that can be modified by set_slave_conf. */ | ||
580 | spin_lock_bh(&atchan->lock); | ||
581 | |||
582 | /* Prepare descriptors. */ | ||
583 | for_each_sg(sgl, sg, sg_len, i) { | ||
584 | struct at_xdmac_desc *desc = NULL; | ||
585 | u32 len, mem; | ||
586 | |||
587 | len = sg_dma_len(sg); | ||
588 | mem = sg_dma_address(sg); | ||
589 | if (unlikely(!len)) { | ||
590 | dev_err(chan2dev(chan), "sg data length is zero\n"); | ||
591 | spin_unlock_bh(&atchan->lock); | ||
592 | return NULL; | ||
593 | } | ||
594 | dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n", | ||
595 | __func__, i, len, mem); | ||
596 | |||
597 | desc = at_xdmac_get_desc(atchan); | ||
598 | if (!desc) { | ||
599 | dev_err(chan2dev(chan), "can't get descriptor\n"); | ||
600 | if (first) | ||
601 | list_splice_init(&first->descs_list, &atchan->free_descs_list); | ||
602 | spin_unlock_bh(&atchan->lock); | ||
603 | return NULL; | ||
604 | } | ||
605 | |||
606 | /* Linked list descriptor setup. */ | ||
607 | if (direction == DMA_DEV_TO_MEM) { | ||
608 | desc->lld.mbr_sa = atchan->per_src_addr; | ||
609 | desc->lld.mbr_da = mem; | ||
610 | cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; | ||
611 | } else { | ||
612 | desc->lld.mbr_sa = mem; | ||
613 | desc->lld.mbr_da = atchan->per_dst_addr; | ||
614 | cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; | ||
615 | } | ||
616 | desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 /* next descriptor view */ | ||
617 | | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */ | ||
618 | | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */ | ||
619 | | (i == sg_len - 1 ? 0 : AT_XDMAC_MBR_UBC_NDE) /* descriptor fetch */ | ||
620 | | len / (1 << at_xdmac_get_dwidth(cfg)); /* microblock length */ | ||
621 | dev_dbg(chan2dev(chan), | ||
622 | "%s: lld: mbr_sa=0x%08x, mbr_da=0x%08x, mbr_ubc=0x%08x\n", | ||
623 | __func__, desc->lld.mbr_sa, desc->lld.mbr_da, desc->lld.mbr_ubc); | ||
624 | |||
625 | /* Chain lld. */ | ||
626 | if (prev) { | ||
627 | prev->lld.mbr_nda = desc->tx_dma_desc.phys; | ||
628 | dev_dbg(chan2dev(chan), | ||
629 | "%s: chain lld: prev=0x%p, mbr_nda=0x%08x\n", | ||
630 | __func__, prev, prev->lld.mbr_nda); | ||
631 | } | ||
632 | |||
633 | prev = desc; | ||
634 | if (!first) | ||
635 | first = desc; | ||
636 | |||
637 | dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", | ||
638 | __func__, desc, first); | ||
639 | list_add_tail(&desc->desc_node, &first->descs_list); | ||
640 | } | ||
641 | |||
642 | spin_unlock_bh(&atchan->lock); | ||
643 | |||
644 | first->tx_dma_desc.flags = flags; | ||
645 | first->xfer_size = sg_len; | ||
646 | first->direction = direction; | ||
647 | |||
648 | return &first->tx_dma_desc; | ||
649 | } | ||
650 | |||
651 | static struct dma_async_tx_descriptor * | ||
652 | at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, | ||
653 | size_t buf_len, size_t period_len, | ||
654 | enum dma_transfer_direction direction, | ||
655 | unsigned long flags) | ||
656 | { | ||
657 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | ||
658 | struct at_xdmac_desc *first = NULL, *prev = NULL; | ||
659 | unsigned int periods = buf_len / period_len; | ||
660 | int i; | ||
661 | u32 cfg; | ||
662 | |||
663 | dev_dbg(chan2dev(chan), "%s: buf_addr=0x%08x, buf_len=%d, period_len=%d, dir=%s, flags=0x%lx\n", | ||
664 | __func__, buf_addr, buf_len, period_len, | ||
665 | direction == DMA_MEM_TO_DEV ? "mem2per" : "per2mem", flags); | ||
666 | |||
667 | if (!is_slave_direction(direction)) { | ||
668 | dev_err(chan2dev(chan), "invalid DMA direction\n"); | ||
669 | return NULL; | ||
670 | } | ||
671 | |||
672 | if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) { | ||
673 | dev_err(chan2dev(chan), "channel currently used\n"); | ||
674 | return NULL; | ||
675 | } | ||
676 | |||
677 | for (i = 0; i < periods; i++) { | ||
678 | struct at_xdmac_desc *desc = NULL; | ||
679 | |||
680 | spin_lock_bh(&atchan->lock); | ||
681 | desc = at_xdmac_get_desc(atchan); | ||
682 | if (!desc) { | ||
683 | dev_err(chan2dev(chan), "can't get descriptor\n"); | ||
684 | if (first) | ||
685 | list_splice_init(&first->descs_list, &atchan->free_descs_list); | ||
686 | spin_unlock_bh(&atchan->lock); | ||
687 | return NULL; | ||
688 | } | ||
689 | spin_unlock_bh(&atchan->lock); | ||
690 | dev_dbg(chan2dev(chan), | ||
691 | "%s: desc=0x%p, tx_dma_desc.phys=0x%08x\n", | ||
692 | __func__, desc, desc->tx_dma_desc.phys); | ||
693 | |||
694 | if (direction == DMA_DEV_TO_MEM) { | ||
695 | desc->lld.mbr_sa = atchan->per_src_addr; | ||
696 | desc->lld.mbr_da = buf_addr + i * period_len; | ||
697 | cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; | ||
698 | } else { | ||
699 | desc->lld.mbr_sa = buf_addr + i * period_len; | ||
700 | desc->lld.mbr_da = atchan->per_dst_addr; | ||
701 | cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; | ||
702 | }; | ||
703 | desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 | ||
704 | | AT_XDMAC_MBR_UBC_NDEN | ||
705 | | AT_XDMAC_MBR_UBC_NSEN | ||
706 | | AT_XDMAC_MBR_UBC_NDE | ||
707 | | period_len >> at_xdmac_get_dwidth(cfg); | ||
708 | |||
709 | dev_dbg(chan2dev(chan), | ||
710 | "%s: lld: mbr_sa=0x%08x, mbr_da=0x%08x, mbr_ubc=0x%08x\n", | ||
711 | __func__, desc->lld.mbr_sa, desc->lld.mbr_da, desc->lld.mbr_ubc); | ||
712 | |||
713 | /* Chain lld. */ | ||
714 | if (prev) { | ||
715 | prev->lld.mbr_nda = desc->tx_dma_desc.phys; | ||
716 | dev_dbg(chan2dev(chan), | ||
717 | "%s: chain lld: prev=0x%p, mbr_nda=0x%08x\n", | ||
718 | __func__, prev, prev->lld.mbr_nda); | ||
719 | } | ||
720 | |||
721 | prev = desc; | ||
722 | if (!first) | ||
723 | first = desc; | ||
724 | |||
725 | dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", | ||
726 | __func__, desc, first); | ||
727 | list_add_tail(&desc->desc_node, &first->descs_list); | ||
728 | } | ||
729 | |||
730 | prev->lld.mbr_nda = first->tx_dma_desc.phys; | ||
731 | dev_dbg(chan2dev(chan), | ||
732 | "%s: chain lld: prev=0x%p, mbr_nda=0x%08x\n", | ||
733 | __func__, prev, prev->lld.mbr_nda); | ||
734 | first->tx_dma_desc.flags = flags; | ||
735 | first->xfer_size = buf_len; | ||
736 | first->direction = direction; | ||
737 | |||
738 | return &first->tx_dma_desc; | ||
739 | } | ||
740 | |||
741 | static struct dma_async_tx_descriptor * | ||
742 | at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | ||
743 | size_t len, unsigned long flags) | ||
744 | { | ||
745 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | ||
746 | struct at_xdmac_desc *first = NULL, *prev = NULL; | ||
747 | size_t remaining_size = len, xfer_size = 0, ublen; | ||
748 | dma_addr_t src_addr = src, dst_addr = dest; | ||
749 | u32 dwidth; | ||
750 | /* | ||
751 | * WARNING: We don't know the direction, it involves we can't | ||
752 | * dynamically set the source and dest interface so we have to use the | ||
753 | * same one. Only interface 0 allows EBI access. Hopefully we can | ||
754 | * access DDR through both ports (at least on SAMA5D4x), so we can use | ||
755 | * the same interface for source and dest, that solves the fact we | ||
756 | * don't know the direction. | ||
757 | */ | ||
758 | u32 chan_cc = AT_XDMAC_CC_DAM_INCREMENTED_AM | ||
759 | | AT_XDMAC_CC_SAM_INCREMENTED_AM | ||
760 | | AT_XDMAC_CC_DIF(0) | ||
761 | | AT_XDMAC_CC_SIF(0) | ||
762 | | AT_XDMAC_CC_MBSIZE_SIXTEEN | ||
763 | | AT_XDMAC_CC_TYPE_MEM_TRAN; | ||
764 | |||
765 | dev_dbg(chan2dev(chan), "%s: src=0x%08x, dest=0x%08x, len=%d, flags=0x%lx\n", | ||
766 | __func__, src, dest, len, flags); | ||
767 | |||
768 | if (unlikely(!len)) | ||
769 | return NULL; | ||
770 | |||
771 | /* | ||
772 | * Check address alignment to select the greater data width we can use. | ||
773 | * Some XDMAC implementations don't provide dword transfer, in this | ||
774 | * case selecting dword has the same behavior as selecting word transfers. | ||
775 | */ | ||
776 | if (!((src_addr | dst_addr) & 7)) { | ||
777 | dwidth = AT_XDMAC_CC_DWIDTH_DWORD; | ||
778 | dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__); | ||
779 | } else if (!((src_addr | dst_addr) & 3)) { | ||
780 | dwidth = AT_XDMAC_CC_DWIDTH_WORD; | ||
781 | dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__); | ||
782 | } else if (!((src_addr | dst_addr) & 1)) { | ||
783 | dwidth = AT_XDMAC_CC_DWIDTH_HALFWORD; | ||
784 | dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__); | ||
785 | } else { | ||
786 | dwidth = AT_XDMAC_CC_DWIDTH_BYTE; | ||
787 | dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__); | ||
788 | } | ||
789 | |||
790 | /* Prepare descriptors. */ | ||
791 | while (remaining_size) { | ||
792 | struct at_xdmac_desc *desc = NULL; | ||
793 | |||
794 | dev_dbg(chan2dev(chan), "%s: remaining_size=%u\n", __func__, remaining_size); | ||
795 | |||
796 | spin_lock_bh(&atchan->lock); | ||
797 | desc = at_xdmac_get_desc(atchan); | ||
798 | spin_unlock_bh(&atchan->lock); | ||
799 | if (!desc) { | ||
800 | dev_err(chan2dev(chan), "can't get descriptor\n"); | ||
801 | if (first) | ||
802 | list_splice_init(&first->descs_list, &atchan->free_descs_list); | ||
803 | return NULL; | ||
804 | } | ||
805 | |||
806 | /* Update src and dest addresses. */ | ||
807 | src_addr += xfer_size; | ||
808 | dst_addr += xfer_size; | ||
809 | |||
810 | if (remaining_size >= AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth) | ||
811 | xfer_size = AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth; | ||
812 | else | ||
813 | xfer_size = remaining_size; | ||
814 | |||
815 | dev_dbg(chan2dev(chan), "%s: xfer_size=%u\n", __func__, xfer_size); | ||
816 | |||
817 | /* Check remaining length and change data width if needed. */ | ||
818 | if (!((src_addr | dst_addr | xfer_size) & 7)) { | ||
819 | dwidth = AT_XDMAC_CC_DWIDTH_DWORD; | ||
820 | dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__); | ||
821 | } else if (!((src_addr | dst_addr | xfer_size) & 3)) { | ||
822 | dwidth = AT_XDMAC_CC_DWIDTH_WORD; | ||
823 | dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__); | ||
824 | } else if (!((src_addr | dst_addr | xfer_size) & 1)) { | ||
825 | dwidth = AT_XDMAC_CC_DWIDTH_HALFWORD; | ||
826 | dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__); | ||
827 | } else if ((src_addr | dst_addr | xfer_size) & 1) { | ||
828 | dwidth = AT_XDMAC_CC_DWIDTH_BYTE; | ||
829 | dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__); | ||
830 | } | ||
831 | chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth); | ||
832 | |||
833 | ublen = xfer_size >> dwidth; | ||
834 | remaining_size -= xfer_size; | ||
835 | |||
836 | desc->lld.mbr_sa = src_addr; | ||
837 | desc->lld.mbr_da = dst_addr; | ||
838 | desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 | ||
839 | | AT_XDMAC_MBR_UBC_NDEN | ||
840 | | AT_XDMAC_MBR_UBC_NSEN | ||
841 | | (remaining_size ? AT_XDMAC_MBR_UBC_NDE : 0) | ||
842 | | ublen; | ||
843 | desc->lld.mbr_cfg = chan_cc; | ||
844 | |||
845 | dev_dbg(chan2dev(chan), | ||
846 | "%s: lld: mbr_sa=0x%08x, mbr_da=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", | ||
847 | __func__, desc->lld.mbr_sa, desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg); | ||
848 | |||
849 | /* Chain lld. */ | ||
850 | if (prev) { | ||
851 | prev->lld.mbr_nda = desc->tx_dma_desc.phys; | ||
852 | dev_dbg(chan2dev(chan), | ||
853 | "%s: chain lld: prev=0x%p, mbr_nda=0x%08x\n", | ||
854 | __func__, prev, prev->lld.mbr_nda); | ||
855 | } | ||
856 | |||
857 | prev = desc; | ||
858 | if (!first) | ||
859 | first = desc; | ||
860 | |||
861 | dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", | ||
862 | __func__, desc, first); | ||
863 | list_add_tail(&desc->desc_node, &first->descs_list); | ||
864 | } | ||
865 | |||
866 | first->tx_dma_desc.flags = flags; | ||
867 | first->xfer_size = len; | ||
868 | |||
869 | return &first->tx_dma_desc; | ||
870 | } | ||
871 | |||
872 | static enum dma_status | ||
873 | at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | ||
874 | struct dma_tx_state *txstate) | ||
875 | { | ||
876 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | ||
877 | struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); | ||
878 | struct at_xdmac_desc *desc, *_desc; | ||
879 | struct list_head *descs_list; | ||
880 | enum dma_status ret; | ||
881 | int residue; | ||
882 | u32 cur_nda; | ||
883 | u8 dwidth = at_xdmac_get_dwidth(atchan->cfg[AT_XDMAC_CUR_CFG]); | ||
884 | |||
885 | ret = dma_cookie_status(chan, cookie, txstate); | ||
886 | if (ret == DMA_COMPLETE) | ||
887 | return ret; | ||
888 | |||
889 | if (!txstate) | ||
890 | return ret; | ||
891 | |||
892 | spin_lock_bh(&atchan->lock); | ||
893 | |||
894 | desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); | ||
895 | |||
896 | /* | ||
897 | * If the transfer has not been started yet, don't need to compute the | ||
898 | * residue, it's the transfer length. | ||
899 | */ | ||
900 | if (!desc->active_xfer) { | ||
901 | dma_set_residue(txstate, desc->xfer_size); | ||
902 | return ret; | ||
903 | } | ||
904 | |||
905 | residue = desc->xfer_size; | ||
906 | /* Flush FIFO. */ | ||
907 | at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask); | ||
908 | while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS)) | ||
909 | cpu_relax(); | ||
910 | |||
911 | cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; | ||
912 | /* | ||
913 | * Remove size of all microblocks already transferred and the current | ||
914 | * one. Then add the remaining size to transfer of the current | ||
915 | * microblock. | ||
916 | */ | ||
917 | descs_list = &desc->descs_list; | ||
918 | list_for_each_entry_safe(desc, _desc, descs_list, desc_node) { | ||
919 | residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth; | ||
920 | if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda) | ||
921 | break; | ||
922 | } | ||
923 | residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth; | ||
924 | |||
925 | spin_unlock_bh(&atchan->lock); | ||
926 | |||
927 | dma_set_residue(txstate, residue); | ||
928 | |||
929 | dev_dbg(chan2dev(chan), | ||
930 | "%s: desc=0x%p, tx_dma_desc.phys=0x%08x, tx_status=%d, cookie=%d, residue=%d\n", | ||
931 | __func__, desc, desc->tx_dma_desc.phys, ret, cookie, residue); | ||
932 | |||
933 | return ret; | ||
934 | } | ||
935 | |||
936 | /* Call must be protected by lock. */ | ||
937 | static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan, | ||
938 | struct at_xdmac_desc *desc) | ||
939 | { | ||
940 | dev_dbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); | ||
941 | |||
942 | /* | ||
943 | * Remove the transfer from the transfer list then move the transfer | ||
944 | * descriptors into the free descriptors list. | ||
945 | */ | ||
946 | list_del(&desc->xfer_node); | ||
947 | list_splice_init(&desc->descs_list, &atchan->free_descs_list); | ||
948 | } | ||
949 | |||
950 | static void at_xdmac_advance_work(struct at_xdmac_chan *atchan) | ||
951 | { | ||
952 | struct at_xdmac_desc *desc; | ||
953 | |||
954 | spin_lock_bh(&atchan->lock); | ||
955 | |||
956 | /* | ||
957 | * If channel is enabled, do nothing, advance_work will be triggered | ||
958 | * after the interruption. | ||
959 | */ | ||
960 | if (!at_xdmac_chan_is_enabled(atchan) && !list_empty(&atchan->xfers_list)) { | ||
961 | desc = list_first_entry(&atchan->xfers_list, | ||
962 | struct at_xdmac_desc, | ||
963 | xfer_node); | ||
964 | dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); | ||
965 | if (!desc->active_xfer) | ||
966 | at_xdmac_start_xfer(atchan, desc); | ||
967 | } | ||
968 | |||
969 | spin_unlock_bh(&atchan->lock); | ||
970 | } | ||
971 | |||
972 | static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) | ||
973 | { | ||
974 | struct at_xdmac_desc *desc; | ||
975 | struct dma_async_tx_descriptor *txd; | ||
976 | |||
977 | desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); | ||
978 | txd = &desc->tx_dma_desc; | ||
979 | |||
980 | if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT)) | ||
981 | txd->callback(txd->callback_param); | ||
982 | } | ||
983 | |||
984 | static void at_xdmac_tasklet(unsigned long data) | ||
985 | { | ||
986 | struct at_xdmac_chan *atchan = (struct at_xdmac_chan *)data; | ||
987 | struct at_xdmac_desc *desc; | ||
988 | u32 error_mask; | ||
989 | |||
990 | dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n", | ||
991 | __func__, atchan->status); | ||
992 | |||
993 | error_mask = AT_XDMAC_CIS_RBEIS | ||
994 | | AT_XDMAC_CIS_WBEIS | ||
995 | | AT_XDMAC_CIS_ROIS; | ||
996 | |||
997 | if (at_xdmac_chan_is_cyclic(atchan)) { | ||
998 | at_xdmac_handle_cyclic(atchan); | ||
999 | } else if ((atchan->status & AT_XDMAC_CIS_LIS) | ||
1000 | || (atchan->status & error_mask)) { | ||
1001 | struct dma_async_tx_descriptor *txd; | ||
1002 | |||
1003 | if (atchan->status & AT_XDMAC_CIS_RBEIS) | ||
1004 | dev_err(chan2dev(&atchan->chan), "read bus error!!!"); | ||
1005 | if (atchan->status & AT_XDMAC_CIS_WBEIS) | ||
1006 | dev_err(chan2dev(&atchan->chan), "write bus error!!!"); | ||
1007 | if (atchan->status & AT_XDMAC_CIS_ROIS) | ||
1008 | dev_err(chan2dev(&atchan->chan), "request overflow error!!!"); | ||
1009 | |||
1010 | spin_lock_bh(&atchan->lock); | ||
1011 | desc = list_first_entry(&atchan->xfers_list, | ||
1012 | struct at_xdmac_desc, | ||
1013 | xfer_node); | ||
1014 | dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); | ||
1015 | BUG_ON(!desc->active_xfer); | ||
1016 | |||
1017 | txd = &desc->tx_dma_desc; | ||
1018 | |||
1019 | at_xdmac_remove_xfer(atchan, desc); | ||
1020 | spin_unlock_bh(&atchan->lock); | ||
1021 | |||
1022 | if (!at_xdmac_chan_is_cyclic(atchan)) { | ||
1023 | dma_cookie_complete(txd); | ||
1024 | if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT)) | ||
1025 | txd->callback(txd->callback_param); | ||
1026 | } | ||
1027 | |||
1028 | dma_run_dependencies(txd); | ||
1029 | |||
1030 | at_xdmac_advance_work(atchan); | ||
1031 | } | ||
1032 | } | ||
1033 | |||
1034 | static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id) | ||
1035 | { | ||
1036 | struct at_xdmac *atxdmac = (struct at_xdmac *)dev_id; | ||
1037 | struct at_xdmac_chan *atchan; | ||
1038 | u32 imr, status, pending; | ||
1039 | u32 chan_imr, chan_status; | ||
1040 | int i, ret = IRQ_NONE; | ||
1041 | |||
1042 | do { | ||
1043 | imr = at_xdmac_read(atxdmac, AT_XDMAC_GIM); | ||
1044 | status = at_xdmac_read(atxdmac, AT_XDMAC_GIS); | ||
1045 | pending = status & imr; | ||
1046 | |||
1047 | dev_vdbg(atxdmac->dma.dev, | ||
1048 | "%s: status=0x%08x, imr=0x%08x, pending=0x%08x\n", | ||
1049 | __func__, status, imr, pending); | ||
1050 | |||
1051 | if (!pending) | ||
1052 | break; | ||
1053 | |||
1054 | /* We have to find which channel has generated the interrupt. */ | ||
1055 | for (i = 0; i < atxdmac->dma.chancnt; i++) { | ||
1056 | if (!((1 << i) & pending)) | ||
1057 | continue; | ||
1058 | |||
1059 | atchan = &atxdmac->chan[i]; | ||
1060 | chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); | ||
1061 | chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS); | ||
1062 | atchan->status = chan_status & chan_imr; | ||
1063 | dev_vdbg(atxdmac->dma.dev, | ||
1064 | "%s: chan%d: imr=0x%x, status=0x%x\n", | ||
1065 | __func__, i, chan_imr, chan_status); | ||
1066 | dev_vdbg(chan2dev(&atchan->chan), | ||
1067 | "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n", | ||
1068 | __func__, | ||
1069 | at_xdmac_chan_read(atchan, AT_XDMAC_CC), | ||
1070 | at_xdmac_chan_read(atchan, AT_XDMAC_CNDA), | ||
1071 | at_xdmac_chan_read(atchan, AT_XDMAC_CNDC), | ||
1072 | at_xdmac_chan_read(atchan, AT_XDMAC_CSA), | ||
1073 | at_xdmac_chan_read(atchan, AT_XDMAC_CDA), | ||
1074 | at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); | ||
1075 | |||
1076 | if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) | ||
1077 | at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); | ||
1078 | |||
1079 | tasklet_schedule(&atchan->tasklet); | ||
1080 | ret = IRQ_HANDLED; | ||
1081 | } | ||
1082 | |||
1083 | } while (pending); | ||
1084 | |||
1085 | return ret; | ||
1086 | } | ||
1087 | |||
1088 | static void at_xdmac_issue_pending(struct dma_chan *chan) | ||
1089 | { | ||
1090 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | ||
1091 | |||
1092 | dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__); | ||
1093 | |||
1094 | if (!at_xdmac_chan_is_cyclic(atchan)) | ||
1095 | at_xdmac_advance_work(atchan); | ||
1096 | |||
1097 | return; | ||
1098 | } | ||
1099 | |||
1100 | static int at_xdmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
1101 | unsigned long arg) | ||
1102 | { | ||
1103 | struct at_xdmac_desc *desc, *_desc; | ||
1104 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | ||
1105 | struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); | ||
1106 | int ret = 0; | ||
1107 | |||
1108 | dev_dbg(chan2dev(chan), "%s: cmd=%d\n", __func__, cmd); | ||
1109 | |||
1110 | spin_lock_bh(&atchan->lock); | ||
1111 | |||
1112 | switch (cmd) { | ||
1113 | case DMA_PAUSE: | ||
1114 | at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask); | ||
1115 | set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); | ||
1116 | break; | ||
1117 | |||
1118 | case DMA_RESUME: | ||
1119 | if (!at_xdmac_chan_is_paused(atchan)) | ||
1120 | break; | ||
1121 | |||
1122 | at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask); | ||
1123 | clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); | ||
1124 | break; | ||
1125 | |||
1126 | case DMA_TERMINATE_ALL: | ||
1127 | at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); | ||
1128 | while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) | ||
1129 | cpu_relax(); | ||
1130 | |||
1131 | /* Cancel all pending transfers. */ | ||
1132 | list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) | ||
1133 | at_xdmac_remove_xfer(atchan, desc); | ||
1134 | |||
1135 | clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); | ||
1136 | break; | ||
1137 | |||
1138 | case DMA_SLAVE_CONFIG: | ||
1139 | ret = at_xdmac_set_slave_config(chan, | ||
1140 | (struct dma_slave_config *)arg); | ||
1141 | break; | ||
1142 | |||
1143 | default: | ||
1144 | dev_err(chan2dev(chan), | ||
1145 | "unmanaged or unknown dma control cmd: %d\n", cmd); | ||
1146 | ret = -ENXIO; | ||
1147 | } | ||
1148 | |||
1149 | spin_unlock_bh(&atchan->lock); | ||
1150 | |||
1151 | return ret; | ||
1152 | } | ||
1153 | |||
1154 | static int at_xdmac_alloc_chan_resources(struct dma_chan *chan) | ||
1155 | { | ||
1156 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | ||
1157 | struct at_xdmac_desc *desc; | ||
1158 | int i; | ||
1159 | |||
1160 | spin_lock_bh(&atchan->lock); | ||
1161 | |||
1162 | if (at_xdmac_chan_is_enabled(atchan)) { | ||
1163 | dev_err(chan2dev(chan), | ||
1164 | "can't allocate channel resources (channel enabled)\n"); | ||
1165 | i = -EIO; | ||
1166 | goto spin_unlock; | ||
1167 | } | ||
1168 | |||
1169 | if (!list_empty(&atchan->free_descs_list)) { | ||
1170 | dev_err(chan2dev(chan), | ||
1171 | "can't allocate channel resources (channel not free from a previous use)\n"); | ||
1172 | i = -EIO; | ||
1173 | goto spin_unlock; | ||
1174 | } | ||
1175 | |||
1176 | for (i = 0; i < init_nr_desc_per_channel; i++) { | ||
1177 | desc = at_xdmac_alloc_desc(chan, GFP_ATOMIC); | ||
1178 | if (!desc) { | ||
1179 | dev_warn(chan2dev(chan), | ||
1180 | "only %d descriptors have been allocated\n", i); | ||
1181 | break; | ||
1182 | } | ||
1183 | list_add_tail(&desc->desc_node, &atchan->free_descs_list); | ||
1184 | } | ||
1185 | |||
1186 | dma_cookie_init(chan); | ||
1187 | |||
1188 | dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); | ||
1189 | |||
1190 | spin_unlock: | ||
1191 | spin_unlock_bh(&atchan->lock); | ||
1192 | return i; | ||
1193 | } | ||
1194 | |||
1195 | static void at_xdmac_free_chan_resources(struct dma_chan *chan) | ||
1196 | { | ||
1197 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | ||
1198 | struct at_xdmac *atxdmac = to_at_xdmac(chan->device); | ||
1199 | struct at_xdmac_desc *desc, *_desc; | ||
1200 | |||
1201 | list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) { | ||
1202 | dev_dbg(chan2dev(chan), "%s: freeing descriptor %p\n", __func__, desc); | ||
1203 | list_del(&desc->desc_node); | ||
1204 | dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys); | ||
1205 | } | ||
1206 | |||
1207 | return; | ||
1208 | } | ||
1209 | |||
1210 | #define AT_XDMAC_DMA_BUSWIDTHS\ | ||
1211 | (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ | ||
1212 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\ | ||
1213 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\ | ||
1214 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\ | ||
1215 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) | ||
1216 | |||
1217 | static int at_xdmac_device_slave_caps(struct dma_chan *dchan, | ||
1218 | struct dma_slave_caps *caps) | ||
1219 | { | ||
1220 | |||
1221 | caps->src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS; | ||
1222 | caps->dstn_addr_widths = AT_XDMAC_DMA_BUSWIDTHS; | ||
1223 | caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
1224 | caps->cmd_pause = true; | ||
1225 | caps->cmd_terminate = true; | ||
1226 | caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
1227 | |||
1228 | return 0; | ||
1229 | } | ||
1230 | |||
1231 | #ifdef CONFIG_PM | ||
1232 | static int atmel_xdmac_prepare(struct device *dev) | ||
1233 | { | ||
1234 | struct platform_device *pdev = to_platform_device(dev); | ||
1235 | struct at_xdmac *atxdmac = platform_get_drvdata(pdev); | ||
1236 | struct dma_chan *chan, *_chan; | ||
1237 | |||
1238 | list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { | ||
1239 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | ||
1240 | |||
1241 | /* Wait for transfer completion, except in cyclic case. */ | ||
1242 | if (at_xdmac_chan_is_enabled(atchan) && !at_xdmac_chan_is_cyclic(atchan)) | ||
1243 | return -EAGAIN; | ||
1244 | } | ||
1245 | return 0; | ||
1246 | } | ||
1247 | #else | ||
1248 | # define atmel_xdmac_prepare NULL | ||
1249 | #endif | ||
1250 | |||
1251 | #ifdef CONFIG_PM_SLEEP | ||
1252 | static int atmel_xdmac_suspend(struct device *dev) | ||
1253 | { | ||
1254 | struct platform_device *pdev = to_platform_device(dev); | ||
1255 | struct at_xdmac *atxdmac = platform_get_drvdata(pdev); | ||
1256 | struct dma_chan *chan, *_chan; | ||
1257 | |||
1258 | list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { | ||
1259 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | ||
1260 | |||
1261 | if (at_xdmac_chan_is_cyclic(atchan)) { | ||
1262 | if (!at_xdmac_chan_is_paused(atchan)) | ||
1263 | at_xdmac_control(chan, DMA_PAUSE, 0); | ||
1264 | atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); | ||
1265 | atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA); | ||
1266 | atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC); | ||
1267 | } | ||
1268 | } | ||
1269 | atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM); | ||
1270 | |||
1271 | at_xdmac_off(atxdmac); | ||
1272 | clk_disable_unprepare(atxdmac->clk); | ||
1273 | return 0; | ||
1274 | } | ||
1275 | |||
1276 | static int atmel_xdmac_resume(struct device *dev) | ||
1277 | { | ||
1278 | struct platform_device *pdev = to_platform_device(dev); | ||
1279 | struct at_xdmac *atxdmac = platform_get_drvdata(pdev); | ||
1280 | struct at_xdmac_chan *atchan; | ||
1281 | struct dma_chan *chan, *_chan; | ||
1282 | int i; | ||
1283 | u32 cfg; | ||
1284 | |||
1285 | clk_prepare_enable(atxdmac->clk); | ||
1286 | |||
1287 | /* Clear pending interrupts. */ | ||
1288 | for (i = 0; i < atxdmac->dma.chancnt; i++) { | ||
1289 | atchan = &atxdmac->chan[i]; | ||
1290 | while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS)) | ||
1291 | cpu_relax(); | ||
1292 | } | ||
1293 | |||
1294 | at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim); | ||
1295 | at_xdmac_write(atxdmac, AT_XDMAC_GE, atxdmac->save_gs); | ||
1296 | list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { | ||
1297 | atchan = to_at_xdmac_chan(chan); | ||
1298 | cfg = atchan->cfg[AT_XDMAC_CUR_CFG]; | ||
1299 | at_xdmac_chan_write(atchan, AT_XDMAC_CC, cfg); | ||
1300 | if (at_xdmac_chan_is_cyclic(atchan)) { | ||
1301 | at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda); | ||
1302 | at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc); | ||
1303 | at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim); | ||
1304 | wmb(); | ||
1305 | at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask); | ||
1306 | } | ||
1307 | } | ||
1308 | return 0; | ||
1309 | } | ||
1310 | #endif /* CONFIG_PM_SLEEP */ | ||
1311 | |||
1312 | static int at_xdmac_probe(struct platform_device *pdev) | ||
1313 | { | ||
1314 | struct resource *res; | ||
1315 | struct at_xdmac *atxdmac; | ||
1316 | int irq, size, nr_channels, i, ret; | ||
1317 | void __iomem *base; | ||
1318 | u32 reg; | ||
1319 | |||
1320 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1321 | if (!res) | ||
1322 | return -EINVAL; | ||
1323 | |||
1324 | irq = platform_get_irq(pdev, 0); | ||
1325 | if (irq < 0) | ||
1326 | return irq; | ||
1327 | |||
1328 | base = devm_ioremap_resource(&pdev->dev, res); | ||
1329 | if (IS_ERR(base)) | ||
1330 | return PTR_ERR(base); | ||
1331 | |||
1332 | /* | ||
1333 | * Read number of xdmac channels, read helper function can't be used | ||
1334 | * since atxdmac is not yet allocated and we need to know the number | ||
1335 | * of channels to do the allocation. | ||
1336 | */ | ||
1337 | reg = readl_relaxed(base + AT_XDMAC_GTYPE); | ||
1338 | nr_channels = AT_XDMAC_NB_CH(reg); | ||
1339 | if (nr_channels > AT_XDMAC_MAX_CHAN) { | ||
1340 | dev_err(&pdev->dev, "invalid number of channels (%u)\n", | ||
1341 | nr_channels); | ||
1342 | return -EINVAL; | ||
1343 | } | ||
1344 | |||
1345 | size = sizeof(*atxdmac); | ||
1346 | size += nr_channels * sizeof(struct at_xdmac_chan); | ||
1347 | atxdmac = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); | ||
1348 | if (!atxdmac) { | ||
1349 | dev_err(&pdev->dev, "can't allocate at_xdmac structure\n"); | ||
1350 | return -ENOMEM; | ||
1351 | } | ||
1352 | |||
1353 | atxdmac->regs = base; | ||
1354 | atxdmac->irq = irq; | ||
1355 | |||
1356 | atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk"); | ||
1357 | if (IS_ERR(atxdmac->clk)) { | ||
1358 | dev_err(&pdev->dev, "can't get dma_clk\n"); | ||
1359 | return PTR_ERR(atxdmac->clk); | ||
1360 | } | ||
1361 | |||
1362 | /* Do not use dev res to prevent races with tasklet */ | ||
1363 | ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac); | ||
1364 | if (ret) { | ||
1365 | dev_err(&pdev->dev, "can't request irq\n"); | ||
1366 | return ret; | ||
1367 | } | ||
1368 | |||
1369 | ret = clk_prepare_enable(atxdmac->clk); | ||
1370 | if (ret) { | ||
1371 | dev_err(&pdev->dev, "can't prepare or enable clock\n"); | ||
1372 | goto err_free_irq; | ||
1373 | } | ||
1374 | |||
1375 | atxdmac->at_xdmac_desc_pool = | ||
1376 | dmam_pool_create(dev_name(&pdev->dev), &pdev->dev, | ||
1377 | sizeof(struct at_xdmac_desc), 4, 0); | ||
1378 | if (!atxdmac->at_xdmac_desc_pool) { | ||
1379 | dev_err(&pdev->dev, "no memory for descriptors dma pool\n"); | ||
1380 | ret = -ENOMEM; | ||
1381 | goto err_clk_disable; | ||
1382 | } | ||
1383 | |||
1384 | dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask); | ||
1385 | dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask); | ||
1386 | dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask); | ||
1387 | atxdmac->dma.dev = &pdev->dev; | ||
1388 | atxdmac->dma.device_alloc_chan_resources = at_xdmac_alloc_chan_resources; | ||
1389 | atxdmac->dma.device_free_chan_resources = at_xdmac_free_chan_resources; | ||
1390 | atxdmac->dma.device_tx_status = at_xdmac_tx_status; | ||
1391 | atxdmac->dma.device_issue_pending = at_xdmac_issue_pending; | ||
1392 | atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic; | ||
1393 | atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy; | ||
1394 | atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg; | ||
1395 | atxdmac->dma.device_control = at_xdmac_control; | ||
1396 | atxdmac->dma.chancnt = nr_channels; | ||
1397 | atxdmac->dma.device_slave_caps = at_xdmac_device_slave_caps; | ||
1398 | |||
1399 | /* Disable all chans and interrupts. */ | ||
1400 | at_xdmac_off(atxdmac); | ||
1401 | |||
1402 | /* Init channels. */ | ||
1403 | INIT_LIST_HEAD(&atxdmac->dma.channels); | ||
1404 | for (i = 0; i < nr_channels; i++) { | ||
1405 | struct at_xdmac_chan *atchan = &atxdmac->chan[i]; | ||
1406 | |||
1407 | atchan->chan.device = &atxdmac->dma; | ||
1408 | list_add_tail(&atchan->chan.device_node, | ||
1409 | &atxdmac->dma.channels); | ||
1410 | |||
1411 | atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i); | ||
1412 | atchan->mask = 1 << i; | ||
1413 | |||
1414 | spin_lock_init(&atchan->lock); | ||
1415 | INIT_LIST_HEAD(&atchan->xfers_list); | ||
1416 | INIT_LIST_HEAD(&atchan->free_descs_list); | ||
1417 | tasklet_init(&atchan->tasklet, at_xdmac_tasklet, | ||
1418 | (unsigned long)atchan); | ||
1419 | |||
1420 | /* Clear pending interrupts. */ | ||
1421 | while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS)) | ||
1422 | cpu_relax(); | ||
1423 | } | ||
1424 | platform_set_drvdata(pdev, atxdmac); | ||
1425 | |||
1426 | ret = dma_async_device_register(&atxdmac->dma); | ||
1427 | if (ret) { | ||
1428 | dev_err(&pdev->dev, "fail to register DMA engine device\n"); | ||
1429 | goto err_clk_disable; | ||
1430 | } | ||
1431 | |||
1432 | ret = of_dma_controller_register(pdev->dev.of_node, | ||
1433 | at_xdmac_xlate, atxdmac); | ||
1434 | if (ret) { | ||
1435 | dev_err(&pdev->dev, "could not register of dma controller\n"); | ||
1436 | goto err_dma_unregister; | ||
1437 | } | ||
1438 | |||
1439 | dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n", | ||
1440 | nr_channels, atxdmac->regs); | ||
1441 | |||
1442 | return 0; | ||
1443 | |||
1444 | err_dma_unregister: | ||
1445 | dma_async_device_unregister(&atxdmac->dma); | ||
1446 | err_clk_disable: | ||
1447 | clk_disable_unprepare(atxdmac->clk); | ||
1448 | err_free_irq: | ||
1449 | free_irq(atxdmac->irq, atxdmac->dma.dev); | ||
1450 | return ret; | ||
1451 | } | ||
1452 | |||
1453 | static int at_xdmac_remove(struct platform_device *pdev) | ||
1454 | { | ||
1455 | struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev); | ||
1456 | int i; | ||
1457 | |||
1458 | at_xdmac_off(atxdmac); | ||
1459 | of_dma_controller_free(pdev->dev.of_node); | ||
1460 | dma_async_device_unregister(&atxdmac->dma); | ||
1461 | clk_disable_unprepare(atxdmac->clk); | ||
1462 | |||
1463 | synchronize_irq(atxdmac->irq); | ||
1464 | |||
1465 | free_irq(atxdmac->irq, atxdmac->dma.dev); | ||
1466 | |||
1467 | for (i = 0; i < atxdmac->dma.chancnt; i++) { | ||
1468 | struct at_xdmac_chan *atchan = &atxdmac->chan[i]; | ||
1469 | |||
1470 | tasklet_kill(&atchan->tasklet); | ||
1471 | at_xdmac_free_chan_resources(&atchan->chan); | ||
1472 | } | ||
1473 | |||
1474 | return 0; | ||
1475 | } | ||
1476 | |||
1477 | static const struct dev_pm_ops atmel_xdmac_dev_pm_ops = { | ||
1478 | .prepare = atmel_xdmac_prepare, | ||
1479 | SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume) | ||
1480 | }; | ||
1481 | |||
1482 | static const struct of_device_id atmel_xdmac_dt_ids[] = { | ||
1483 | { | ||
1484 | .compatible = "atmel,sama5d4-dma", | ||
1485 | }, { | ||
1486 | /* sentinel */ | ||
1487 | } | ||
1488 | }; | ||
1489 | MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids); | ||
1490 | |||
1491 | static struct platform_driver at_xdmac_driver = { | ||
1492 | .probe = at_xdmac_probe, | ||
1493 | .remove = at_xdmac_remove, | ||
1494 | .driver = { | ||
1495 | .name = "at_xdmac", | ||
1496 | .owner = THIS_MODULE, | ||
1497 | .of_match_table = of_match_ptr(atmel_xdmac_dt_ids), | ||
1498 | .pm = &atmel_xdmac_dev_pm_ops, | ||
1499 | } | ||
1500 | }; | ||
1501 | |||
1502 | static int __init at_xdmac_init(void) | ||
1503 | { | ||
1504 | return platform_driver_probe(&at_xdmac_driver, at_xdmac_probe); | ||
1505 | } | ||
1506 | subsys_initcall(at_xdmac_init); | ||
1507 | |||
1508 | MODULE_DESCRIPTION("Atmel Extended DMA Controller driver"); | ||
1509 | MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>"); | ||
1510 | MODULE_LICENSE("GPL"); | ||
diff --git a/include/dt-bindings/dma/at91.h b/include/dt-bindings/dma/at91.h index e835037a77b4..ab6cbba45401 100644 --- a/include/dt-bindings/dma/at91.h +++ b/include/dt-bindings/dma/at91.h | |||
@@ -9,6 +9,8 @@ | |||
9 | #ifndef __DT_BINDINGS_AT91_DMA_H__ | 9 | #ifndef __DT_BINDINGS_AT91_DMA_H__ |
10 | #define __DT_BINDINGS_AT91_DMA_H__ | 10 | #define __DT_BINDINGS_AT91_DMA_H__ |
11 | 11 | ||
12 | /* ---------- HDMAC ---------- */ | ||
13 | |||
12 | /* | 14 | /* |
13 | * Source and/or destination peripheral ID | 15 | * Source and/or destination peripheral ID |
14 | */ | 16 | */ |
@@ -24,4 +26,27 @@ | |||
24 | #define AT91_DMA_CFG_FIFOCFG_ALAP (0x1 << AT91_DMA_CFG_FIFOCFG_OFFSET) /* largest defined AHB burst */ | 26 | #define AT91_DMA_CFG_FIFOCFG_ALAP (0x1 << AT91_DMA_CFG_FIFOCFG_OFFSET) /* largest defined AHB burst */ |
25 | #define AT91_DMA_CFG_FIFOCFG_ASAP (0x2 << AT91_DMA_CFG_FIFOCFG_OFFSET) /* single AHB access */ | 27 | #define AT91_DMA_CFG_FIFOCFG_ASAP (0x2 << AT91_DMA_CFG_FIFOCFG_OFFSET) /* single AHB access */ |
26 | 28 | ||
29 | |||
30 | /* ---------- XDMAC ---------- */ | ||
31 | #define AT91_XDMAC_DT_MEM_IF_MASK (0x1) | ||
32 | #define AT91_XDMAC_DT_MEM_IF_OFFSET (13) | ||
33 | #define AT91_XDMAC_DT_MEM_IF(mem_if) (((mem_if) & AT91_XDMAC_DT_MEM_IF_MASK) \ | ||
34 | << AT91_XDMAC_DT_MEM_IF_OFFSET) | ||
35 | #define AT91_XDMAC_DT_GET_MEM_IF(cfg) (((cfg) >> AT91_XDMAC_DT_MEM_IF_OFFSET) \ | ||
36 | & AT91_XDMAC_DT_MEM_IF_MASK) | ||
37 | |||
38 | #define AT91_XDMAC_DT_PER_IF_MASK (0x1) | ||
39 | #define AT91_XDMAC_DT_PER_IF_OFFSET (14) | ||
40 | #define AT91_XDMAC_DT_PER_IF(per_if) (((per_if) & AT91_XDMAC_DT_PER_IF_MASK) \ | ||
41 | << AT91_XDMAC_DT_PER_IF_OFFSET) | ||
42 | #define AT91_XDMAC_DT_GET_PER_IF(cfg) (((cfg) >> AT91_XDMAC_DT_PER_IF_OFFSET) \ | ||
43 | & AT91_XDMAC_DT_PER_IF_MASK) | ||
44 | |||
45 | #define AT91_XDMAC_DT_PERID_MASK (0x7f) | ||
46 | #define AT91_XDMAC_DT_PERID_OFFSET (24) | ||
47 | #define AT91_XDMAC_DT_PERID(perid) (((perid) & AT91_XDMAC_DT_PERID_MASK) \ | ||
48 | << AT91_XDMAC_DT_PERID_OFFSET) | ||
49 | #define AT91_XDMAC_DT_GET_PERID(cfg) (((cfg) >> AT91_XDMAC_DT_PERID_OFFSET) \ | ||
50 | & AT91_XDMAC_DT_PERID_MASK) | ||
51 | |||
27 | #endif /* __DT_BINDINGS_AT91_DMA_H__ */ | 52 | #endif /* __DT_BINDINGS_AT91_DMA_H__ */ |