diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /drivers/dma | |
parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) |
Patched in Tegra support.
Diffstat (limited to 'drivers/dma')
58 files changed, 3014 insertions, 15462 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index d4c12180c65..2e3b3d38c46 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -53,7 +53,6 @@ config AMBA_PL08X | |||
53 | bool "ARM PrimeCell PL080 or PL081 support" | 53 | bool "ARM PrimeCell PL080 or PL081 support" |
54 | depends on ARM_AMBA && EXPERIMENTAL | 54 | depends on ARM_AMBA && EXPERIMENTAL |
55 | select DMA_ENGINE | 55 | select DMA_ENGINE |
56 | select DMA_VIRTUAL_CHANNELS | ||
57 | help | 56 | help |
58 | Platform has a PL08x DMAC device | 57 | Platform has a PL08x DMAC device |
59 | which can provide DMA engine support | 58 | which can provide DMA engine support |
@@ -90,23 +89,13 @@ config DW_DMAC | |||
90 | Support the Synopsys DesignWare AHB DMA controller. This | 89 | Support the Synopsys DesignWare AHB DMA controller. This |
91 | can be integrated in chips such as the Atmel AT32ap7000. | 90 | can be integrated in chips such as the Atmel AT32ap7000. |
92 | 91 | ||
93 | config DW_DMAC_BIG_ENDIAN_IO | ||
94 | bool "Use big endian I/O register access" | ||
95 | default y if AVR32 | ||
96 | depends on DW_DMAC | ||
97 | help | ||
98 | Say yes here to use big endian I/O access when reading and writing | ||
99 | to the DMA controller registers. This is needed on some platforms, | ||
100 | like the Atmel AVR32 architecture. | ||
101 | |||
102 | If unsure, use the default setting. | ||
103 | |||
104 | config AT_HDMAC | 92 | config AT_HDMAC |
105 | tristate "Atmel AHB DMA support" | 93 | tristate "Atmel AHB DMA support" |
106 | depends on ARCH_AT91 | 94 | depends on ARCH_AT91SAM9RL || ARCH_AT91SAM9G45 |
107 | select DMA_ENGINE | 95 | select DMA_ENGINE |
108 | help | 96 | help |
109 | Support the Atmel AHB DMA controller. | 97 | Support the Atmel AHB DMA controller. This can be integrated in |
98 | chips such as the Atmel AT91SAM9RL. | ||
110 | 99 | ||
111 | config FSL_DMA | 100 | config FSL_DMA |
112 | tristate "Freescale Elo and Elo Plus DMA support" | 101 | tristate "Freescale Elo and Elo Plus DMA support" |
@@ -135,7 +124,7 @@ config MV_XOR | |||
135 | 124 | ||
136 | config MX3_IPU | 125 | config MX3_IPU |
137 | bool "MX3x Image Processing Unit support" | 126 | bool "MX3x Image Processing Unit support" |
138 | depends on ARCH_MXC | 127 | depends on ARCH_MX3 |
139 | select DMA_ENGINE | 128 | select DMA_ENGINE |
140 | default y | 129 | default y |
141 | help | 130 | help |
@@ -160,20 +149,6 @@ config TXX9_DMAC | |||
160 | Support the TXx9 SoC internal DMA controller. This can be | 149 | Support the TXx9 SoC internal DMA controller. This can be |
161 | integrated in chips such as the Toshiba TX4927/38/39. | 150 | integrated in chips such as the Toshiba TX4927/38/39. |
162 | 151 | ||
163 | config TEGRA20_APB_DMA | ||
164 | bool "NVIDIA Tegra20 APB DMA support" | ||
165 | depends on ARCH_TEGRA | ||
166 | select DMA_ENGINE | ||
167 | help | ||
168 | Support for the NVIDIA Tegra20 APB DMA controller driver. The | ||
169 | DMA controller is having multiple DMA channel which can be | ||
170 | configured for different peripherals like audio, UART, SPI, | ||
171 | I2C etc which is in APB bus. | ||
172 | This DMA controller transfers data from memory to peripheral fifo | ||
173 | or vice versa. It does not support memory to memory data transfer. | ||
174 | |||
175 | |||
176 | |||
177 | config SH_DMAE | 152 | config SH_DMAE |
178 | tristate "Renesas SuperH DMAC support" | 153 | tristate "Renesas SuperH DMAC support" |
179 | depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE) | 154 | depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE) |
@@ -212,60 +187,43 @@ config TIMB_DMA | |||
212 | help | 187 | help |
213 | Enable support for the Timberdale FPGA DMA engine. | 188 | Enable support for the Timberdale FPGA DMA engine. |
214 | 189 | ||
215 | config SIRF_DMA | ||
216 | tristate "CSR SiRFprimaII DMA support" | ||
217 | depends on ARCH_PRIMA2 | ||
218 | select DMA_ENGINE | ||
219 | help | ||
220 | Enable support for the CSR SiRFprimaII DMA engine. | ||
221 | |||
222 | config TI_EDMA | ||
223 | tristate "TI EDMA support" | ||
224 | depends on ARCH_DAVINCI | ||
225 | select DMA_ENGINE | ||
226 | select DMA_VIRTUAL_CHANNELS | ||
227 | default n | ||
228 | help | ||
229 | Enable support for the TI EDMA controller. This DMA | ||
230 | engine is found on TI DaVinci and AM33xx parts. | ||
231 | |||
232 | config ARCH_HAS_ASYNC_TX_FIND_CHANNEL | 190 | config ARCH_HAS_ASYNC_TX_FIND_CHANNEL |
233 | bool | 191 | bool |
234 | 192 | ||
235 | config PL330_DMA | 193 | config PL330_DMA |
236 | tristate "DMA API Driver for PL330" | 194 | tristate "DMA API Driver for PL330" |
237 | select DMA_ENGINE | 195 | select DMA_ENGINE |
238 | depends on ARM_AMBA | 196 | depends on PL330 |
239 | help | 197 | help |
240 | Select if your platform has one or more PL330 DMACs. | 198 | Select if your platform has one or more PL330 DMACs. |
241 | You need to provide platform specific settings via | 199 | You need to provide platform specific settings via |
242 | platform_data for a dma-pl330 device. | 200 | platform_data for a dma-pl330 device. |
243 | 201 | ||
244 | config PCH_DMA | 202 | config PCH_DMA |
245 | tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA" | 203 | tristate "Intel EG20T PCH / OKI Semi IOH(ML7213/ML7223) DMA support" |
246 | depends on PCI && X86 | 204 | depends on PCI && X86 |
247 | select DMA_ENGINE | 205 | select DMA_ENGINE |
248 | help | 206 | help |
249 | Enable support for Intel EG20T PCH DMA engine. | 207 | Enable support for Intel EG20T PCH DMA engine. |
250 | 208 | ||
251 | This driver also can be used for LAPIS Semiconductor IOH(Input/ | 209 | This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ |
252 | Output Hub), ML7213, ML7223 and ML7831. | 210 | Output Hub), ML7213 and ML7223. |
253 | ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is | 211 | ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is |
254 | for MP(Media Phone) use and ML7831 IOH is for general purpose use. | 212 | for MP(Media Phone) use. |
255 | ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series. | 213 | ML7213/ML7223 is companion chip for Intel Atom E6xx series. |
256 | ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH. | 214 | ML7213/ML7223 is completely compatible for Intel EG20T PCH. |
257 | 215 | ||
258 | config IMX_SDMA | 216 | config IMX_SDMA |
259 | tristate "i.MX SDMA support" | 217 | tristate "i.MX SDMA support" |
260 | depends on ARCH_MXC | 218 | depends on ARCH_MX25 || ARCH_MX3 || ARCH_MX5 |
261 | select DMA_ENGINE | 219 | select DMA_ENGINE |
262 | help | 220 | help |
263 | Support the i.MX SDMA engine. This engine is integrated into | 221 | Support the i.MX SDMA engine. This engine is integrated into |
264 | Freescale i.MX25/31/35/51/53 chips. | 222 | Freescale i.MX25/31/35/51 chips. |
265 | 223 | ||
266 | config IMX_DMA | 224 | config IMX_DMA |
267 | tristate "i.MX DMA support" | 225 | tristate "i.MX DMA support" |
268 | depends on ARCH_MXC | 226 | depends on IMX_HAVE_DMA_V1 |
269 | select DMA_ENGINE | 227 | select DMA_ENGINE |
270 | help | 228 | help |
271 | Support the i.MX DMA engine. This engine is integrated into | 229 | Support the i.MX DMA engine. This engine is integrated into |
@@ -273,8 +231,7 @@ config IMX_DMA | |||
273 | 231 | ||
274 | config MXS_DMA | 232 | config MXS_DMA |
275 | bool "MXS DMA support" | 233 | bool "MXS DMA support" |
276 | depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q | 234 | depends on SOC_IMX23 || SOC_IMX28 |
277 | select STMP_DEVICE | ||
278 | select DMA_ENGINE | 235 | select DMA_ENGINE |
279 | help | 236 | help |
280 | Support the MXS DMA engine. This engine including APBH-DMA | 237 | Support the MXS DMA engine. This engine including APBH-DMA |
@@ -287,45 +244,9 @@ config EP93XX_DMA | |||
287 | help | 244 | help |
288 | Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller. | 245 | Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller. |
289 | 246 | ||
290 | config DMA_SA11X0 | ||
291 | tristate "SA-11x0 DMA support" | ||
292 | depends on ARCH_SA1100 | ||
293 | select DMA_ENGINE | ||
294 | select DMA_VIRTUAL_CHANNELS | ||
295 | help | ||
296 | Support the DMA engine found on Intel StrongARM SA-1100 and | ||
297 | SA-1110 SoCs. This DMA engine can only be used with on-chip | ||
298 | devices. | ||
299 | |||
300 | config MMP_TDMA | ||
301 | bool "MMP Two-Channel DMA support" | ||
302 | depends on ARCH_MMP | ||
303 | select DMA_ENGINE | ||
304 | help | ||
305 | Support the MMP Two-Channel DMA engine. | ||
306 | This engine used for MMP Audio DMA and pxa910 SQU. | ||
307 | |||
308 | Say Y here if you enabled MMP ADMA, otherwise say N. | ||
309 | |||
310 | config DMA_OMAP | ||
311 | tristate "OMAP DMA support" | ||
312 | depends on ARCH_OMAP | ||
313 | select DMA_ENGINE | ||
314 | select DMA_VIRTUAL_CHANNELS | ||
315 | |||
316 | config MMP_PDMA | ||
317 | bool "MMP PDMA support" | ||
318 | depends on (ARCH_MMP || ARCH_PXA) | ||
319 | select DMA_ENGINE | ||
320 | help | ||
321 | Support the MMP PDMA engine for PXA and MMP platfrom. | ||
322 | |||
323 | config DMA_ENGINE | 247 | config DMA_ENGINE |
324 | bool | 248 | bool |
325 | 249 | ||
326 | config DMA_VIRTUAL_CHANNELS | ||
327 | tristate | ||
328 | |||
329 | comment "DMA Clients" | 250 | comment "DMA Clients" |
330 | depends on DMA_ENGINE | 251 | depends on DMA_ENGINE |
331 | 252 | ||
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 7428feaa870..30cf3b1f0c5 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -2,7 +2,6 @@ ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG | |||
2 | ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG | 2 | ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG |
3 | 3 | ||
4 | obj-$(CONFIG_DMA_ENGINE) += dmaengine.o | 4 | obj-$(CONFIG_DMA_ENGINE) += dmaengine.o |
5 | obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o | ||
6 | obj-$(CONFIG_NET_DMA) += iovlock.o | 5 | obj-$(CONFIG_NET_DMA) += iovlock.o |
7 | obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o | 6 | obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o |
8 | obj-$(CONFIG_DMATEST) += dmatest.o | 7 | obj-$(CONFIG_DMATEST) += dmatest.o |
@@ -15,22 +14,15 @@ obj-$(CONFIG_DW_DMAC) += dw_dmac.o | |||
15 | obj-$(CONFIG_AT_HDMAC) += at_hdmac.o | 14 | obj-$(CONFIG_AT_HDMAC) += at_hdmac.o |
16 | obj-$(CONFIG_MX3_IPU) += ipu/ | 15 | obj-$(CONFIG_MX3_IPU) += ipu/ |
17 | obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o | 16 | obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o |
18 | obj-$(CONFIG_SH_DMAE) += sh/ | 17 | obj-$(CONFIG_SH_DMAE) += shdma.o |
19 | obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o | 18 | obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o |
20 | obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ | 19 | obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ |
21 | obj-$(CONFIG_IMX_SDMA) += imx-sdma.o | 20 | obj-$(CONFIG_IMX_SDMA) += imx-sdma.o |
22 | obj-$(CONFIG_IMX_DMA) += imx-dma.o | 21 | obj-$(CONFIG_IMX_DMA) += imx-dma.o |
23 | obj-$(CONFIG_MXS_DMA) += mxs-dma.o | 22 | obj-$(CONFIG_MXS_DMA) += mxs-dma.o |
24 | obj-$(CONFIG_TIMB_DMA) += timb_dma.o | 23 | obj-$(CONFIG_TIMB_DMA) += timb_dma.o |
25 | obj-$(CONFIG_SIRF_DMA) += sirf-dma.o | ||
26 | obj-$(CONFIG_TI_EDMA) += edma.o | ||
27 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o | 24 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o |
28 | obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o | ||
29 | obj-$(CONFIG_PL330_DMA) += pl330.o | 25 | obj-$(CONFIG_PL330_DMA) += pl330.o |
30 | obj-$(CONFIG_PCH_DMA) += pch_dma.o | 26 | obj-$(CONFIG_PCH_DMA) += pch_dma.o |
31 | obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o | 27 | obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o |
32 | obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o | 28 | obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o |
33 | obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o | ||
34 | obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o | ||
35 | obj-$(CONFIG_DMA_OMAP) += omap-dma.o | ||
36 | obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o | ||
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index d1cc5791476..be21e3f138a 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -66,45 +66,40 @@ | |||
66 | * after the final transfer signalled by LBREQ or LSREQ. The DMAC | 66 | * after the final transfer signalled by LBREQ or LSREQ. The DMAC |
67 | * will then move to the next LLI entry. | 67 | * will then move to the next LLI entry. |
68 | * | 68 | * |
69 | * Only the former works sanely with scatter lists, so we only implement | ||
70 | * the DMAC flow control method. However, peripherals which use the LBREQ | ||
71 | * and LSREQ signals (eg, MMCI) are unable to use this mode, which through | ||
72 | * these hardware restrictions prevents them from using scatter DMA. | ||
73 | * | ||
69 | * Global TODO: | 74 | * Global TODO: |
70 | * - Break out common code from arch/arm/mach-s3c64xx and share | 75 | * - Break out common code from arch/arm/mach-s3c64xx and share |
71 | */ | 76 | */ |
72 | #include <linux/amba/bus.h> | ||
73 | #include <linux/amba/pl08x.h> | ||
74 | #include <linux/debugfs.h> | ||
75 | #include <linux/delay.h> | ||
76 | #include <linux/device.h> | 77 | #include <linux/device.h> |
77 | #include <linux/dmaengine.h> | ||
78 | #include <linux/dmapool.h> | ||
79 | #include <linux/dma-mapping.h> | ||
80 | #include <linux/init.h> | 78 | #include <linux/init.h> |
81 | #include <linux/interrupt.h> | ||
82 | #include <linux/module.h> | 79 | #include <linux/module.h> |
83 | #include <linux/pm_runtime.h> | 80 | #include <linux/interrupt.h> |
84 | #include <linux/seq_file.h> | ||
85 | #include <linux/slab.h> | 81 | #include <linux/slab.h> |
86 | #include <asm/hardware/pl080.h> | 82 | #include <linux/delay.h> |
83 | #include <linux/dma-mapping.h> | ||
84 | #include <linux/dmapool.h> | ||
85 | #include <linux/dmaengine.h> | ||
86 | #include <linux/amba/bus.h> | ||
87 | #include <linux/amba/pl08x.h> | ||
88 | #include <linux/debugfs.h> | ||
89 | #include <linux/seq_file.h> | ||
87 | 90 | ||
88 | #include "dmaengine.h" | 91 | #include <asm/hardware/pl080.h> |
89 | #include "virt-dma.h" | ||
90 | 92 | ||
91 | #define DRIVER_NAME "pl08xdmac" | 93 | #define DRIVER_NAME "pl08xdmac" |
92 | 94 | ||
93 | static struct amba_driver pl08x_amba_driver; | ||
94 | struct pl08x_driver_data; | ||
95 | |||
96 | /** | 95 | /** |
97 | * struct vendor_data - vendor-specific config parameters for PL08x derivatives | 96 | * struct vendor_data - vendor-specific config parameters for PL08x derivatives |
98 | * @channels: the number of channels available in this variant | 97 | * @channels: the number of channels available in this variant |
99 | * @dualmaster: whether this version supports dual AHB masters or not. | 98 | * @dualmaster: whether this version supports dual AHB masters or not. |
100 | * @nomadik: whether the channels have Nomadik security extension bits | ||
101 | * that need to be checked for permission before use and some registers are | ||
102 | * missing | ||
103 | */ | 99 | */ |
104 | struct vendor_data { | 100 | struct vendor_data { |
105 | u8 channels; | 101 | u8 channels; |
106 | bool dualmaster; | 102 | bool dualmaster; |
107 | bool nomadik; | ||
108 | }; | 103 | }; |
109 | 104 | ||
110 | /* | 105 | /* |
@@ -121,123 +116,6 @@ struct pl08x_lli { | |||
121 | }; | 116 | }; |
122 | 117 | ||
123 | /** | 118 | /** |
124 | * struct pl08x_bus_data - information of source or destination | ||
125 | * busses for a transfer | ||
126 | * @addr: current address | ||
127 | * @maxwidth: the maximum width of a transfer on this bus | ||
128 | * @buswidth: the width of this bus in bytes: 1, 2 or 4 | ||
129 | */ | ||
130 | struct pl08x_bus_data { | ||
131 | dma_addr_t addr; | ||
132 | u8 maxwidth; | ||
133 | u8 buswidth; | ||
134 | }; | ||
135 | |||
136 | /** | ||
137 | * struct pl08x_phy_chan - holder for the physical channels | ||
138 | * @id: physical index to this channel | ||
139 | * @lock: a lock to use when altering an instance of this struct | ||
140 | * @serving: the virtual channel currently being served by this physical | ||
141 | * channel | ||
142 | * @locked: channel unavailable for the system, e.g. dedicated to secure | ||
143 | * world | ||
144 | */ | ||
145 | struct pl08x_phy_chan { | ||
146 | unsigned int id; | ||
147 | void __iomem *base; | ||
148 | spinlock_t lock; | ||
149 | struct pl08x_dma_chan *serving; | ||
150 | bool locked; | ||
151 | }; | ||
152 | |||
153 | /** | ||
154 | * struct pl08x_sg - structure containing data per sg | ||
155 | * @src_addr: src address of sg | ||
156 | * @dst_addr: dst address of sg | ||
157 | * @len: transfer len in bytes | ||
158 | * @node: node for txd's dsg_list | ||
159 | */ | ||
160 | struct pl08x_sg { | ||
161 | dma_addr_t src_addr; | ||
162 | dma_addr_t dst_addr; | ||
163 | size_t len; | ||
164 | struct list_head node; | ||
165 | }; | ||
166 | |||
167 | /** | ||
168 | * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor | ||
169 | * @vd: virtual DMA descriptor | ||
170 | * @dsg_list: list of children sg's | ||
171 | * @llis_bus: DMA memory address (physical) start for the LLIs | ||
172 | * @llis_va: virtual memory address start for the LLIs | ||
173 | * @cctl: control reg values for current txd | ||
174 | * @ccfg: config reg values for current txd | ||
175 | * @done: this marks completed descriptors, which should not have their | ||
176 | * mux released. | ||
177 | */ | ||
178 | struct pl08x_txd { | ||
179 | struct virt_dma_desc vd; | ||
180 | struct list_head dsg_list; | ||
181 | dma_addr_t llis_bus; | ||
182 | struct pl08x_lli *llis_va; | ||
183 | /* Default cctl value for LLIs */ | ||
184 | u32 cctl; | ||
185 | /* | ||
186 | * Settings to be put into the physical channel when we | ||
187 | * trigger this txd. Other registers are in llis_va[0]. | ||
188 | */ | ||
189 | u32 ccfg; | ||
190 | bool done; | ||
191 | }; | ||
192 | |||
193 | /** | ||
194 | * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel | ||
195 | * states | ||
196 | * @PL08X_CHAN_IDLE: the channel is idle | ||
197 | * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport | ||
198 | * channel and is running a transfer on it | ||
199 | * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport | ||
200 | * channel, but the transfer is currently paused | ||
201 | * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport | ||
202 | * channel to become available (only pertains to memcpy channels) | ||
203 | */ | ||
204 | enum pl08x_dma_chan_state { | ||
205 | PL08X_CHAN_IDLE, | ||
206 | PL08X_CHAN_RUNNING, | ||
207 | PL08X_CHAN_PAUSED, | ||
208 | PL08X_CHAN_WAITING, | ||
209 | }; | ||
210 | |||
211 | /** | ||
212 | * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel | ||
213 | * @vc: wrappped virtual channel | ||
214 | * @phychan: the physical channel utilized by this channel, if there is one | ||
215 | * @name: name of channel | ||
216 | * @cd: channel platform data | ||
217 | * @runtime_addr: address for RX/TX according to the runtime config | ||
218 | * @at: active transaction on this channel | ||
219 | * @lock: a lock for this channel data | ||
220 | * @host: a pointer to the host (internal use) | ||
221 | * @state: whether the channel is idle, paused, running etc | ||
222 | * @slave: whether this channel is a device (slave) or for memcpy | ||
223 | * @signal: the physical DMA request signal which this channel is using | ||
224 | * @mux_use: count of descriptors using this DMA request signal setting | ||
225 | */ | ||
226 | struct pl08x_dma_chan { | ||
227 | struct virt_dma_chan vc; | ||
228 | struct pl08x_phy_chan *phychan; | ||
229 | const char *name; | ||
230 | const struct pl08x_channel_data *cd; | ||
231 | struct dma_slave_config cfg; | ||
232 | struct pl08x_txd *at; | ||
233 | struct pl08x_driver_data *host; | ||
234 | enum pl08x_dma_chan_state state; | ||
235 | bool slave; | ||
236 | int signal; | ||
237 | unsigned mux_use; | ||
238 | }; | ||
239 | |||
240 | /** | ||
241 | * struct pl08x_driver_data - the local state holder for the PL08x | 119 | * struct pl08x_driver_data - the local state holder for the PL08x |
242 | * @slave: slave engine for this instance | 120 | * @slave: slave engine for this instance |
243 | * @memcpy: memcpy engine for this instance | 121 | * @memcpy: memcpy engine for this instance |
@@ -247,8 +125,8 @@ struct pl08x_dma_chan { | |||
247 | * @pd: platform data passed in from the platform/machine | 125 | * @pd: platform data passed in from the platform/machine |
248 | * @phy_chans: array of data for the physical channels | 126 | * @phy_chans: array of data for the physical channels |
249 | * @pool: a pool for the LLI descriptors | 127 | * @pool: a pool for the LLI descriptors |
250 | * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI | 128 | * @pool_ctr: counter of LLIs in the pool |
251 | * fetches | 129 | * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches |
252 | * @mem_buses: set to indicate memory transfers on AHB2. | 130 | * @mem_buses: set to indicate memory transfers on AHB2. |
253 | * @lock: a spinlock for this struct | 131 | * @lock: a spinlock for this struct |
254 | */ | 132 | */ |
@@ -261,14 +139,24 @@ struct pl08x_driver_data { | |||
261 | struct pl08x_platform_data *pd; | 139 | struct pl08x_platform_data *pd; |
262 | struct pl08x_phy_chan *phy_chans; | 140 | struct pl08x_phy_chan *phy_chans; |
263 | struct dma_pool *pool; | 141 | struct dma_pool *pool; |
142 | int pool_ctr; | ||
264 | u8 lli_buses; | 143 | u8 lli_buses; |
265 | u8 mem_buses; | 144 | u8 mem_buses; |
145 | spinlock_t lock; | ||
266 | }; | 146 | }; |
267 | 147 | ||
268 | /* | 148 | /* |
269 | * PL08X specific defines | 149 | * PL08X specific defines |
270 | */ | 150 | */ |
271 | 151 | ||
152 | /* | ||
153 | * Memory boundaries: the manual for PL08x says that the controller | ||
154 | * cannot read past a 1KiB boundary, so these defines are used to | ||
155 | * create transfer LLIs that do not cross such boundaries. | ||
156 | */ | ||
157 | #define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */ | ||
158 | #define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT) | ||
159 | |||
272 | /* Size (bytes) of each LLI buffer allocated for one transfer */ | 160 | /* Size (bytes) of each LLI buffer allocated for one transfer */ |
273 | # define PL08X_LLI_TSFR_SIZE 0x2000 | 161 | # define PL08X_LLI_TSFR_SIZE 0x2000 |
274 | 162 | ||
@@ -278,51 +166,12 @@ struct pl08x_driver_data { | |||
278 | 166 | ||
279 | static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) | 167 | static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) |
280 | { | 168 | { |
281 | return container_of(chan, struct pl08x_dma_chan, vc.chan); | 169 | return container_of(chan, struct pl08x_dma_chan, chan); |
282 | } | 170 | } |
283 | 171 | ||
284 | static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) | 172 | static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) |
285 | { | 173 | { |
286 | return container_of(tx, struct pl08x_txd, vd.tx); | 174 | return container_of(tx, struct pl08x_txd, tx); |
287 | } | ||
288 | |||
289 | /* | ||
290 | * Mux handling. | ||
291 | * | ||
292 | * This gives us the DMA request input to the PL08x primecell which the | ||
293 | * peripheral described by the channel data will be routed to, possibly | ||
294 | * via a board/SoC specific external MUX. One important point to note | ||
295 | * here is that this does not depend on the physical channel. | ||
296 | */ | ||
297 | static int pl08x_request_mux(struct pl08x_dma_chan *plchan) | ||
298 | { | ||
299 | const struct pl08x_platform_data *pd = plchan->host->pd; | ||
300 | int ret; | ||
301 | |||
302 | if (plchan->mux_use++ == 0 && pd->get_signal) { | ||
303 | ret = pd->get_signal(plchan->cd); | ||
304 | if (ret < 0) { | ||
305 | plchan->mux_use = 0; | ||
306 | return ret; | ||
307 | } | ||
308 | |||
309 | plchan->signal = ret; | ||
310 | } | ||
311 | return 0; | ||
312 | } | ||
313 | |||
314 | static void pl08x_release_mux(struct pl08x_dma_chan *plchan) | ||
315 | { | ||
316 | const struct pl08x_platform_data *pd = plchan->host->pd; | ||
317 | |||
318 | if (plchan->signal >= 0) { | ||
319 | WARN_ON(plchan->mux_use == 0); | ||
320 | |||
321 | if (--plchan->mux_use == 0 && pd->put_signal) { | ||
322 | pd->put_signal(plchan->cd, plchan->signal); | ||
323 | plchan->signal = -1; | ||
324 | } | ||
325 | } | ||
326 | } | 175 | } |
327 | 176 | ||
328 | /* | 177 | /* |
@@ -344,25 +193,20 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) | |||
344 | * been set when the LLIs were constructed. Poke them into the hardware | 193 | * been set when the LLIs were constructed. Poke them into the hardware |
345 | * and start the transfer. | 194 | * and start the transfer. |
346 | */ | 195 | */ |
347 | static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan) | 196 | static void pl08x_start_txd(struct pl08x_dma_chan *plchan, |
197 | struct pl08x_txd *txd) | ||
348 | { | 198 | { |
349 | struct pl08x_driver_data *pl08x = plchan->host; | 199 | struct pl08x_driver_data *pl08x = plchan->host; |
350 | struct pl08x_phy_chan *phychan = plchan->phychan; | 200 | struct pl08x_phy_chan *phychan = plchan->phychan; |
351 | struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc); | 201 | struct pl08x_lli *lli = &txd->llis_va[0]; |
352 | struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); | ||
353 | struct pl08x_lli *lli; | ||
354 | u32 val; | 202 | u32 val; |
355 | 203 | ||
356 | list_del(&txd->vd.node); | ||
357 | |||
358 | plchan->at = txd; | 204 | plchan->at = txd; |
359 | 205 | ||
360 | /* Wait for channel inactive */ | 206 | /* Wait for channel inactive */ |
361 | while (pl08x_phy_channel_busy(phychan)) | 207 | while (pl08x_phy_channel_busy(phychan)) |
362 | cpu_relax(); | 208 | cpu_relax(); |
363 | 209 | ||
364 | lli = &txd->llis_va[0]; | ||
365 | |||
366 | dev_vdbg(&pl08x->adev->dev, | 210 | dev_vdbg(&pl08x->adev->dev, |
367 | "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " | 211 | "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " |
368 | "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", | 212 | "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", |
@@ -428,6 +272,7 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) | |||
428 | writel(val, ch->base + PL080_CH_CONFIG); | 272 | writel(val, ch->base + PL080_CH_CONFIG); |
429 | } | 273 | } |
430 | 274 | ||
275 | |||
431 | /* | 276 | /* |
432 | * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and | 277 | * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and |
433 | * clears any pending interrupt status. This should not be used for | 278 | * clears any pending interrupt status. This should not be used for |
@@ -471,8 +316,10 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) | |||
471 | { | 316 | { |
472 | struct pl08x_phy_chan *ch; | 317 | struct pl08x_phy_chan *ch; |
473 | struct pl08x_txd *txd; | 318 | struct pl08x_txd *txd; |
319 | unsigned long flags; | ||
474 | size_t bytes = 0; | 320 | size_t bytes = 0; |
475 | 321 | ||
322 | spin_lock_irqsave(&plchan->lock, flags); | ||
476 | ch = plchan->phychan; | 323 | ch = plchan->phychan; |
477 | txd = plchan->at; | 324 | txd = plchan->at; |
478 | 325 | ||
@@ -512,6 +359,16 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) | |||
512 | } | 359 | } |
513 | } | 360 | } |
514 | 361 | ||
362 | /* Sum up all queued transactions */ | ||
363 | if (!list_empty(&plchan->pend_list)) { | ||
364 | struct pl08x_txd *txdi; | ||
365 | list_for_each_entry(txdi, &plchan->pend_list, node) { | ||
366 | bytes += txdi->len; | ||
367 | } | ||
368 | } | ||
369 | |||
370 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
371 | |||
515 | return bytes; | 372 | return bytes; |
516 | } | 373 | } |
517 | 374 | ||
@@ -535,8 +392,9 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, | |||
535 | 392 | ||
536 | spin_lock_irqsave(&ch->lock, flags); | 393 | spin_lock_irqsave(&ch->lock, flags); |
537 | 394 | ||
538 | if (!ch->locked && !ch->serving) { | 395 | if (!ch->serving) { |
539 | ch->serving = virt_chan; | 396 | ch->serving = virt_chan; |
397 | ch->signal = -1; | ||
540 | spin_unlock_irqrestore(&ch->lock, flags); | 398 | spin_unlock_irqrestore(&ch->lock, flags); |
541 | break; | 399 | break; |
542 | } | 400 | } |
@@ -552,111 +410,19 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, | |||
552 | return ch; | 410 | return ch; |
553 | } | 411 | } |
554 | 412 | ||
555 | /* Mark the physical channel as free. Note, this write is atomic. */ | ||
556 | static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, | 413 | static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, |
557 | struct pl08x_phy_chan *ch) | 414 | struct pl08x_phy_chan *ch) |
558 | { | 415 | { |
559 | ch->serving = NULL; | 416 | unsigned long flags; |
560 | } | ||
561 | |||
562 | /* | ||
563 | * Try to allocate a physical channel. When successful, assign it to | ||
564 | * this virtual channel, and initiate the next descriptor. The | ||
565 | * virtual channel lock must be held at this point. | ||
566 | */ | ||
567 | static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan) | ||
568 | { | ||
569 | struct pl08x_driver_data *pl08x = plchan->host; | ||
570 | struct pl08x_phy_chan *ch; | ||
571 | |||
572 | ch = pl08x_get_phy_channel(pl08x, plchan); | ||
573 | if (!ch) { | ||
574 | dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); | ||
575 | plchan->state = PL08X_CHAN_WAITING; | ||
576 | return; | ||
577 | } | ||
578 | |||
579 | dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n", | ||
580 | ch->id, plchan->name); | ||
581 | |||
582 | plchan->phychan = ch; | ||
583 | plchan->state = PL08X_CHAN_RUNNING; | ||
584 | pl08x_start_next_txd(plchan); | ||
585 | } | ||
586 | |||
587 | static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch, | ||
588 | struct pl08x_dma_chan *plchan) | ||
589 | { | ||
590 | struct pl08x_driver_data *pl08x = plchan->host; | ||
591 | |||
592 | dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n", | ||
593 | ch->id, plchan->name); | ||
594 | |||
595 | /* | ||
596 | * We do this without taking the lock; we're really only concerned | ||
597 | * about whether this pointer is NULL or not, and we're guaranteed | ||
598 | * that this will only be called when it _already_ is non-NULL. | ||
599 | */ | ||
600 | ch->serving = plchan; | ||
601 | plchan->phychan = ch; | ||
602 | plchan->state = PL08X_CHAN_RUNNING; | ||
603 | pl08x_start_next_txd(plchan); | ||
604 | } | ||
605 | |||
606 | /* | ||
607 | * Free a physical DMA channel, potentially reallocating it to another | ||
608 | * virtual channel if we have any pending. | ||
609 | */ | ||
610 | static void pl08x_phy_free(struct pl08x_dma_chan *plchan) | ||
611 | { | ||
612 | struct pl08x_driver_data *pl08x = plchan->host; | ||
613 | struct pl08x_dma_chan *p, *next; | ||
614 | |||
615 | retry: | ||
616 | next = NULL; | ||
617 | |||
618 | /* Find a waiting virtual channel for the next transfer. */ | ||
619 | list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node) | ||
620 | if (p->state == PL08X_CHAN_WAITING) { | ||
621 | next = p; | ||
622 | break; | ||
623 | } | ||
624 | |||
625 | if (!next) { | ||
626 | list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node) | ||
627 | if (p->state == PL08X_CHAN_WAITING) { | ||
628 | next = p; | ||
629 | break; | ||
630 | } | ||
631 | } | ||
632 | |||
633 | /* Ensure that the physical channel is stopped */ | ||
634 | pl08x_terminate_phy_chan(pl08x, plchan->phychan); | ||
635 | 417 | ||
636 | if (next) { | 418 | spin_lock_irqsave(&ch->lock, flags); |
637 | bool success; | ||
638 | 419 | ||
639 | /* | 420 | /* Stop the channel and clear its interrupts */ |
640 | * Eww. We know this isn't going to deadlock | 421 | pl08x_terminate_phy_chan(pl08x, ch); |
641 | * but lockdep probably doesn't. | ||
642 | */ | ||
643 | spin_lock(&next->vc.lock); | ||
644 | /* Re-check the state now that we have the lock */ | ||
645 | success = next->state == PL08X_CHAN_WAITING; | ||
646 | if (success) | ||
647 | pl08x_phy_reassign_start(plchan->phychan, next); | ||
648 | spin_unlock(&next->vc.lock); | ||
649 | |||
650 | /* If the state changed, try to find another channel */ | ||
651 | if (!success) | ||
652 | goto retry; | ||
653 | } else { | ||
654 | /* No more jobs, so free up the physical channel */ | ||
655 | pl08x_put_phy_channel(pl08x, plchan->phychan); | ||
656 | } | ||
657 | 422 | ||
658 | plchan->phychan = NULL; | 423 | /* Mark it as free */ |
659 | plchan->state = PL08X_CHAN_IDLE; | 424 | ch->serving = NULL; |
425 | spin_unlock_irqrestore(&ch->lock, flags); | ||
660 | } | 426 | } |
661 | 427 | ||
662 | /* | 428 | /* |
@@ -733,30 +499,36 @@ struct pl08x_lli_build_data { | |||
733 | }; | 499 | }; |
734 | 500 | ||
735 | /* | 501 | /* |
736 | * Autoselect a master bus to use for the transfer. Slave will be the chosen as | 502 | * Autoselect a master bus to use for the transfer this prefers the |
737 | * victim in case src & dest are not similarly aligned. i.e. If after aligning | 503 | * destination bus if both available if fixed address on one bus the |
738 | * masters address with width requirements of transfer (by sending few byte by | 504 | * other will be chosen |
739 | * byte data), slave is still not aligned, then its width will be reduced to | ||
740 | * BYTE. | ||
741 | * - prefers the destination bus if both available | ||
742 | * - prefers bus with fixed address (i.e. peripheral) | ||
743 | */ | 505 | */ |
744 | static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, | 506 | static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, |
745 | struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) | 507 | struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) |
746 | { | 508 | { |
747 | if (!(cctl & PL080_CONTROL_DST_INCR)) { | 509 | if (!(cctl & PL080_CONTROL_DST_INCR)) { |
748 | *mbus = &bd->dstbus; | ||
749 | *sbus = &bd->srcbus; | ||
750 | } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { | ||
751 | *mbus = &bd->srcbus; | 510 | *mbus = &bd->srcbus; |
752 | *sbus = &bd->dstbus; | 511 | *sbus = &bd->dstbus; |
512 | } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { | ||
513 | *mbus = &bd->dstbus; | ||
514 | *sbus = &bd->srcbus; | ||
753 | } else { | 515 | } else { |
754 | if (bd->dstbus.buswidth >= bd->srcbus.buswidth) { | 516 | if (bd->dstbus.buswidth == 4) { |
755 | *mbus = &bd->dstbus; | 517 | *mbus = &bd->dstbus; |
756 | *sbus = &bd->srcbus; | 518 | *sbus = &bd->srcbus; |
757 | } else { | 519 | } else if (bd->srcbus.buswidth == 4) { |
520 | *mbus = &bd->srcbus; | ||
521 | *sbus = &bd->dstbus; | ||
522 | } else if (bd->dstbus.buswidth == 2) { | ||
523 | *mbus = &bd->dstbus; | ||
524 | *sbus = &bd->srcbus; | ||
525 | } else if (bd->srcbus.buswidth == 2) { | ||
758 | *mbus = &bd->srcbus; | 526 | *mbus = &bd->srcbus; |
759 | *sbus = &bd->dstbus; | 527 | *sbus = &bd->dstbus; |
528 | } else { | ||
529 | /* bd->srcbus.buswidth == 1 */ | ||
530 | *mbus = &bd->dstbus; | ||
531 | *sbus = &bd->srcbus; | ||
760 | } | 532 | } |
761 | } | 533 | } |
762 | } | 534 | } |
@@ -775,8 +547,7 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd, | |||
775 | llis_va[num_llis].cctl = cctl; | 547 | llis_va[num_llis].cctl = cctl; |
776 | llis_va[num_llis].src = bd->srcbus.addr; | 548 | llis_va[num_llis].src = bd->srcbus.addr; |
777 | llis_va[num_llis].dst = bd->dstbus.addr; | 549 | llis_va[num_llis].dst = bd->dstbus.addr; |
778 | llis_va[num_llis].lli = llis_bus + (num_llis + 1) * | 550 | llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli); |
779 | sizeof(struct pl08x_lli); | ||
780 | llis_va[num_llis].lli |= bd->lli_bus; | 551 | llis_va[num_llis].lli |= bd->lli_bus; |
781 | 552 | ||
782 | if (cctl & PL080_CONTROL_SRC_INCR) | 553 | if (cctl & PL080_CONTROL_SRC_INCR) |
@@ -789,12 +560,16 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd, | |||
789 | bd->remainder -= len; | 560 | bd->remainder -= len; |
790 | } | 561 | } |
791 | 562 | ||
792 | static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd, | 563 | /* |
793 | u32 *cctl, u32 len, int num_llis, size_t *total_bytes) | 564 | * Return number of bytes to fill to boundary, or len. |
565 | * This calculation works for any value of addr. | ||
566 | */ | ||
567 | static inline size_t pl08x_pre_boundary(u32 addr, size_t len) | ||
794 | { | 568 | { |
795 | *cctl = pl08x_cctl_bits(*cctl, 1, 1, len); | 569 | size_t boundary_len = PL08X_BOUNDARY_SIZE - |
796 | pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl); | 570 | (addr & (PL08X_BOUNDARY_SIZE - 1)); |
797 | (*total_bytes) += len; | 571 | |
572 | return min(boundary_len, len); | ||
798 | } | 573 | } |
799 | 574 | ||
800 | /* | 575 | /* |
@@ -808,20 +583,27 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
808 | struct pl08x_bus_data *mbus, *sbus; | 583 | struct pl08x_bus_data *mbus, *sbus; |
809 | struct pl08x_lli_build_data bd; | 584 | struct pl08x_lli_build_data bd; |
810 | int num_llis = 0; | 585 | int num_llis = 0; |
811 | u32 cctl, early_bytes = 0; | 586 | u32 cctl; |
812 | size_t max_bytes_per_lli, total_bytes; | 587 | size_t max_bytes_per_lli; |
588 | size_t total_bytes = 0; | ||
813 | struct pl08x_lli *llis_va; | 589 | struct pl08x_lli *llis_va; |
814 | struct pl08x_sg *dsg; | ||
815 | 590 | ||
816 | txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus); | 591 | txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, |
592 | &txd->llis_bus); | ||
817 | if (!txd->llis_va) { | 593 | if (!txd->llis_va) { |
818 | dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); | 594 | dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); |
819 | return 0; | 595 | return 0; |
820 | } | 596 | } |
821 | 597 | ||
598 | pl08x->pool_ctr++; | ||
599 | |||
600 | /* Get the default CCTL */ | ||
601 | cctl = txd->cctl; | ||
602 | |||
822 | bd.txd = txd; | 603 | bd.txd = txd; |
604 | bd.srcbus.addr = txd->src_addr; | ||
605 | bd.dstbus.addr = txd->dst_addr; | ||
823 | bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; | 606 | bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; |
824 | cctl = txd->cctl; | ||
825 | 607 | ||
826 | /* Find maximum width of the source bus */ | 608 | /* Find maximum width of the source bus */ |
827 | bd.srcbus.maxwidth = | 609 | bd.srcbus.maxwidth = |
@@ -833,179 +615,215 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
833 | pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> | 615 | pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> |
834 | PL080_CONTROL_DWIDTH_SHIFT); | 616 | PL080_CONTROL_DWIDTH_SHIFT); |
835 | 617 | ||
836 | list_for_each_entry(dsg, &txd->dsg_list, node) { | 618 | /* Set up the bus widths to the maximum */ |
837 | total_bytes = 0; | 619 | bd.srcbus.buswidth = bd.srcbus.maxwidth; |
838 | cctl = txd->cctl; | 620 | bd.dstbus.buswidth = bd.dstbus.maxwidth; |
839 | 621 | ||
840 | bd.srcbus.addr = dsg->src_addr; | 622 | /* |
841 | bd.dstbus.addr = dsg->dst_addr; | 623 | * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) |
842 | bd.remainder = dsg->len; | 624 | */ |
843 | bd.srcbus.buswidth = bd.srcbus.maxwidth; | 625 | max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) * |
844 | bd.dstbus.buswidth = bd.dstbus.maxwidth; | 626 | PL080_CONTROL_TRANSFER_SIZE_MASK; |
845 | 627 | ||
846 | pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); | 628 | /* We need to count this down to zero */ |
629 | bd.remainder = txd->len; | ||
847 | 630 | ||
848 | dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n", | 631 | /* |
849 | bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "", | 632 | * Choose bus to align to |
850 | bd.srcbus.buswidth, | 633 | * - prefers destination bus if both available |
851 | bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "", | 634 | * - if fixed address on one bus chooses other |
852 | bd.dstbus.buswidth, | 635 | */ |
853 | bd.remainder); | 636 | pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); |
854 | dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n", | 637 | |
855 | mbus == &bd.srcbus ? "src" : "dst", | 638 | dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu llimax=%zu\n", |
856 | sbus == &bd.srcbus ? "src" : "dst"); | 639 | bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "", |
640 | bd.srcbus.buswidth, | ||
641 | bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "", | ||
642 | bd.dstbus.buswidth, | ||
643 | bd.remainder, max_bytes_per_lli); | ||
644 | dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n", | ||
645 | mbus == &bd.srcbus ? "src" : "dst", | ||
646 | sbus == &bd.srcbus ? "src" : "dst"); | ||
647 | |||
648 | if (txd->len < mbus->buswidth) { | ||
649 | /* Less than a bus width available - send as single bytes */ | ||
650 | while (bd.remainder) { | ||
651 | dev_vdbg(&pl08x->adev->dev, | ||
652 | "%s single byte LLIs for a transfer of " | ||
653 | "less than a bus width (remain 0x%08x)\n", | ||
654 | __func__, bd.remainder); | ||
655 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | ||
656 | pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); | ||
657 | total_bytes++; | ||
658 | } | ||
659 | } else { | ||
660 | /* Make one byte LLIs until master bus is aligned */ | ||
661 | while ((mbus->addr) % (mbus->buswidth)) { | ||
662 | dev_vdbg(&pl08x->adev->dev, | ||
663 | "%s adjustment lli for less than bus width " | ||
664 | "(remain 0x%08x)\n", | ||
665 | __func__, bd.remainder); | ||
666 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | ||
667 | pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); | ||
668 | total_bytes++; | ||
669 | } | ||
857 | 670 | ||
858 | /* | 671 | /* |
859 | * Zero length is only allowed if all these requirements are | 672 | * Master now aligned |
860 | * met: | 673 | * - if slave is not then we must set its width down |
861 | * - flow controller is peripheral. | ||
862 | * - src.addr is aligned to src.width | ||
863 | * - dst.addr is aligned to dst.width | ||
864 | * | ||
865 | * sg_len == 1 should be true, as there can be two cases here: | ||
866 | * | ||
867 | * - Memory addresses are contiguous and are not scattered. | ||
868 | * Here, Only one sg will be passed by user driver, with | ||
869 | * memory address and zero length. We pass this to controller | ||
870 | * and after the transfer it will receive the last burst | ||
871 | * request from peripheral and so transfer finishes. | ||
872 | * | ||
873 | * - Memory addresses are scattered and are not contiguous. | ||
874 | * Here, Obviously as DMA controller doesn't know when a lli's | ||
875 | * transfer gets over, it can't load next lli. So in this | ||
876 | * case, there has to be an assumption that only one lli is | ||
877 | * supported. Thus, we can't have scattered addresses. | ||
878 | */ | 674 | */ |
879 | if (!bd.remainder) { | 675 | if (sbus->addr % sbus->buswidth) { |
880 | u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >> | 676 | dev_dbg(&pl08x->adev->dev, |
881 | PL080_CONFIG_FLOW_CONTROL_SHIFT; | 677 | "%s set down bus width to one byte\n", |
882 | if (!((fc >= PL080_FLOW_SRC2DST_DST) && | 678 | __func__); |
883 | (fc <= PL080_FLOW_SRC2DST_SRC))) { | ||
884 | dev_err(&pl08x->adev->dev, "%s sg len can't be zero", | ||
885 | __func__); | ||
886 | return 0; | ||
887 | } | ||
888 | |||
889 | if ((bd.srcbus.addr % bd.srcbus.buswidth) || | ||
890 | (bd.dstbus.addr % bd.dstbus.buswidth)) { | ||
891 | dev_err(&pl08x->adev->dev, | ||
892 | "%s src & dst address must be aligned to src" | ||
893 | " & dst width if peripheral is flow controller", | ||
894 | __func__); | ||
895 | return 0; | ||
896 | } | ||
897 | 679 | ||
898 | cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, | 680 | sbus->buswidth = 1; |
899 | bd.dstbus.buswidth, 0); | ||
900 | pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl); | ||
901 | break; | ||
902 | } | 681 | } |
903 | 682 | ||
904 | /* | 683 | /* |
905 | * Send byte by byte for following cases | 684 | * Make largest possible LLIs until less than one bus |
906 | * - Less than a bus width available | 685 | * width left |
907 | * - until master bus is aligned | ||
908 | */ | 686 | */ |
909 | if (bd.remainder < mbus->buswidth) | 687 | while (bd.remainder > (mbus->buswidth - 1)) { |
910 | early_bytes = bd.remainder; | 688 | size_t lli_len, target_len, tsize, odd_bytes; |
911 | else if ((mbus->addr) % (mbus->buswidth)) { | ||
912 | early_bytes = mbus->buswidth - (mbus->addr) % | ||
913 | (mbus->buswidth); | ||
914 | if ((bd.remainder - early_bytes) < mbus->buswidth) | ||
915 | early_bytes = bd.remainder; | ||
916 | } | ||
917 | 689 | ||
918 | if (early_bytes) { | ||
919 | dev_vdbg(&pl08x->adev->dev, | ||
920 | "%s byte width LLIs (remain 0x%08x)\n", | ||
921 | __func__, bd.remainder); | ||
922 | prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++, | ||
923 | &total_bytes); | ||
924 | } | ||
925 | |||
926 | if (bd.remainder) { | ||
927 | /* | 690 | /* |
928 | * Master now aligned | 691 | * If enough left try to send max possible, |
929 | * - if slave is not then we must set its width down | 692 | * otherwise try to send the remainder |
930 | */ | 693 | */ |
931 | if (sbus->addr % sbus->buswidth) { | 694 | target_len = min(bd.remainder, max_bytes_per_lli); |
932 | dev_dbg(&pl08x->adev->dev, | ||
933 | "%s set down bus width to one byte\n", | ||
934 | __func__); | ||
935 | |||
936 | sbus->buswidth = 1; | ||
937 | } | ||
938 | 695 | ||
939 | /* | 696 | /* |
940 | * Bytes transferred = tsize * src width, not | 697 | * Set bus lengths for incrementing buses to the |
941 | * MIN(buswidths) | 698 | * number of bytes which fill to next memory boundary, |
699 | * limiting on the target length calculated above. | ||
942 | */ | 700 | */ |
943 | max_bytes_per_lli = bd.srcbus.buswidth * | 701 | if (cctl & PL080_CONTROL_SRC_INCR) |
944 | PL080_CONTROL_TRANSFER_SIZE_MASK; | 702 | bd.srcbus.fill_bytes = |
945 | dev_vdbg(&pl08x->adev->dev, | 703 | pl08x_pre_boundary(bd.srcbus.addr, |
946 | "%s max bytes per lli = %zu\n", | 704 | target_len); |
947 | __func__, max_bytes_per_lli); | 705 | else |
948 | 706 | bd.srcbus.fill_bytes = target_len; | |
949 | /* | 707 | |
950 | * Make largest possible LLIs until less than one bus | 708 | if (cctl & PL080_CONTROL_DST_INCR) |
951 | * width left | 709 | bd.dstbus.fill_bytes = |
952 | */ | 710 | pl08x_pre_boundary(bd.dstbus.addr, |
953 | while (bd.remainder > (mbus->buswidth - 1)) { | 711 | target_len); |
954 | size_t lli_len, tsize, width; | 712 | else |
713 | bd.dstbus.fill_bytes = target_len; | ||
714 | |||
715 | /* Find the nearest */ | ||
716 | lli_len = min(bd.srcbus.fill_bytes, | ||
717 | bd.dstbus.fill_bytes); | ||
718 | |||
719 | BUG_ON(lli_len > bd.remainder); | ||
720 | |||
721 | if (lli_len <= 0) { | ||
722 | dev_err(&pl08x->adev->dev, | ||
723 | "%s lli_len is %zu, <= 0\n", | ||
724 | __func__, lli_len); | ||
725 | return 0; | ||
726 | } | ||
955 | 727 | ||
728 | if (lli_len == target_len) { | ||
729 | /* | ||
730 | * Can send what we wanted. | ||
731 | * Maintain alignment | ||
732 | */ | ||
733 | lli_len = (lli_len/mbus->buswidth) * | ||
734 | mbus->buswidth; | ||
735 | odd_bytes = 0; | ||
736 | } else { | ||
956 | /* | 737 | /* |
957 | * If enough left try to send max possible, | 738 | * So now we know how many bytes to transfer |
958 | * otherwise try to send the remainder | 739 | * to get to the nearest boundary. The next |
740 | * LLI will past the boundary. However, we | ||
741 | * may be working to a boundary on the slave | ||
742 | * bus. We need to ensure the master stays | ||
743 | * aligned, and that we are working in | ||
744 | * multiples of the bus widths. | ||
959 | */ | 745 | */ |
960 | lli_len = min(bd.remainder, max_bytes_per_lli); | 746 | odd_bytes = lli_len % mbus->buswidth; |
747 | lli_len -= odd_bytes; | ||
961 | 748 | ||
749 | } | ||
750 | |||
751 | if (lli_len) { | ||
962 | /* | 752 | /* |
963 | * Check against maximum bus alignment: | 753 | * Check against minimum bus alignment: |
964 | * Calculate actual transfer size in relation to | 754 | * Calculate actual transfer size in relation |
965 | * bus width an get a maximum remainder of the | 755 | * to bus width an get a maximum remainder of |
966 | * highest bus width - 1 | 756 | * the smallest bus width - 1 |
967 | */ | 757 | */ |
968 | width = max(mbus->buswidth, sbus->buswidth); | 758 | /* FIXME: use round_down()? */ |
969 | lli_len = (lli_len / width) * width; | 759 | tsize = lli_len / min(mbus->buswidth, |
970 | tsize = lli_len / bd.srcbus.buswidth; | 760 | sbus->buswidth); |
761 | lli_len = tsize * min(mbus->buswidth, | ||
762 | sbus->buswidth); | ||
763 | |||
764 | if (target_len != lli_len) { | ||
765 | dev_vdbg(&pl08x->adev->dev, | ||
766 | "%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n", | ||
767 | __func__, target_len, lli_len, txd->len); | ||
768 | } | ||
769 | |||
770 | cctl = pl08x_cctl_bits(cctl, | ||
771 | bd.srcbus.buswidth, | ||
772 | bd.dstbus.buswidth, | ||
773 | tsize); | ||
971 | 774 | ||
972 | dev_vdbg(&pl08x->adev->dev, | 775 | dev_vdbg(&pl08x->adev->dev, |
973 | "%s fill lli with single lli chunk of " | 776 | "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n", |
974 | "size 0x%08zx (remainder 0x%08zx)\n", | ||
975 | __func__, lli_len, bd.remainder); | 777 | __func__, lli_len, bd.remainder); |
976 | |||
977 | cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, | ||
978 | bd.dstbus.buswidth, tsize); | ||
979 | pl08x_fill_lli_for_desc(&bd, num_llis++, | 778 | pl08x_fill_lli_for_desc(&bd, num_llis++, |
980 | lli_len, cctl); | 779 | lli_len, cctl); |
981 | total_bytes += lli_len; | 780 | total_bytes += lli_len; |
982 | } | 781 | } |
983 | 782 | ||
984 | /* | 783 | |
985 | * Send any odd bytes | 784 | if (odd_bytes) { |
986 | */ | 785 | /* |
987 | if (bd.remainder) { | 786 | * Creep past the boundary, maintaining |
988 | dev_vdbg(&pl08x->adev->dev, | 787 | * master alignment |
989 | "%s align with boundary, send odd bytes (remain %zu)\n", | 788 | */ |
990 | __func__, bd.remainder); | 789 | int j; |
991 | prep_byte_width_lli(&bd, &cctl, bd.remainder, | 790 | for (j = 0; (j < mbus->buswidth) |
992 | num_llis++, &total_bytes); | 791 | && (bd.remainder); j++) { |
792 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | ||
793 | dev_vdbg(&pl08x->adev->dev, | ||
794 | "%s align with boundary, single byte (remain 0x%08zx)\n", | ||
795 | __func__, bd.remainder); | ||
796 | pl08x_fill_lli_for_desc(&bd, | ||
797 | num_llis++, 1, cctl); | ||
798 | total_bytes++; | ||
799 | } | ||
993 | } | 800 | } |
994 | } | 801 | } |
995 | 802 | ||
996 | if (total_bytes != dsg->len) { | 803 | /* |
997 | dev_err(&pl08x->adev->dev, | 804 | * Send any odd bytes |
998 | "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", | 805 | */ |
999 | __func__, total_bytes, dsg->len); | 806 | while (bd.remainder) { |
1000 | return 0; | 807 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); |
808 | dev_vdbg(&pl08x->adev->dev, | ||
809 | "%s align with boundary, single odd byte (remain %zu)\n", | ||
810 | __func__, bd.remainder); | ||
811 | pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); | ||
812 | total_bytes++; | ||
1001 | } | 813 | } |
814 | } | ||
815 | if (total_bytes != txd->len) { | ||
816 | dev_err(&pl08x->adev->dev, | ||
817 | "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", | ||
818 | __func__, total_bytes, txd->len); | ||
819 | return 0; | ||
820 | } | ||
1002 | 821 | ||
1003 | if (num_llis >= MAX_NUM_TSFR_LLIS) { | 822 | if (num_llis >= MAX_NUM_TSFR_LLIS) { |
1004 | dev_err(&pl08x->adev->dev, | 823 | dev_err(&pl08x->adev->dev, |
1005 | "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", | 824 | "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", |
1006 | __func__, (u32) MAX_NUM_TSFR_LLIS); | 825 | __func__, (u32) MAX_NUM_TSFR_LLIS); |
1007 | return 0; | 826 | return 0; |
1008 | } | ||
1009 | } | 827 | } |
1010 | 828 | ||
1011 | llis_va = txd->llis_va; | 829 | llis_va = txd->llis_va; |
@@ -1034,91 +852,147 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
1034 | return num_llis; | 852 | return num_llis; |
1035 | } | 853 | } |
1036 | 854 | ||
855 | /* You should call this with the struct pl08x lock held */ | ||
1037 | static void pl08x_free_txd(struct pl08x_driver_data *pl08x, | 856 | static void pl08x_free_txd(struct pl08x_driver_data *pl08x, |
1038 | struct pl08x_txd *txd) | 857 | struct pl08x_txd *txd) |
1039 | { | 858 | { |
1040 | struct pl08x_sg *dsg, *_dsg; | 859 | /* Free the LLI */ |
860 | dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); | ||
1041 | 861 | ||
1042 | if (txd->llis_va) | 862 | pl08x->pool_ctr--; |
1043 | dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); | ||
1044 | |||
1045 | list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) { | ||
1046 | list_del(&dsg->node); | ||
1047 | kfree(dsg); | ||
1048 | } | ||
1049 | 863 | ||
1050 | kfree(txd); | 864 | kfree(txd); |
1051 | } | 865 | } |
1052 | 866 | ||
1053 | static void pl08x_unmap_buffers(struct pl08x_txd *txd) | 867 | static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, |
868 | struct pl08x_dma_chan *plchan) | ||
1054 | { | 869 | { |
1055 | struct device *dev = txd->vd.tx.chan->device->dev; | 870 | struct pl08x_txd *txdi = NULL; |
1056 | struct pl08x_sg *dsg; | 871 | struct pl08x_txd *next; |
1057 | 872 | ||
1058 | if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | 873 | if (!list_empty(&plchan->pend_list)) { |
1059 | if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) | 874 | list_for_each_entry_safe(txdi, |
1060 | list_for_each_entry(dsg, &txd->dsg_list, node) | 875 | next, &plchan->pend_list, node) { |
1061 | dma_unmap_single(dev, dsg->src_addr, dsg->len, | 876 | list_del(&txdi->node); |
1062 | DMA_TO_DEVICE); | 877 | pl08x_free_txd(pl08x, txdi); |
1063 | else { | ||
1064 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
1065 | dma_unmap_page(dev, dsg->src_addr, dsg->len, | ||
1066 | DMA_TO_DEVICE); | ||
1067 | } | 878 | } |
1068 | } | 879 | } |
1069 | if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
1070 | if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
1071 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
1072 | dma_unmap_single(dev, dsg->dst_addr, dsg->len, | ||
1073 | DMA_FROM_DEVICE); | ||
1074 | else | ||
1075 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
1076 | dma_unmap_page(dev, dsg->dst_addr, dsg->len, | ||
1077 | DMA_FROM_DEVICE); | ||
1078 | } | ||
1079 | } | 880 | } |
1080 | 881 | ||
1081 | static void pl08x_desc_free(struct virt_dma_desc *vd) | 882 | /* |
883 | * The DMA ENGINE API | ||
884 | */ | ||
885 | static int pl08x_alloc_chan_resources(struct dma_chan *chan) | ||
1082 | { | 886 | { |
1083 | struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); | 887 | return 0; |
1084 | struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); | 888 | } |
1085 | |||
1086 | if (!plchan->slave) | ||
1087 | pl08x_unmap_buffers(txd); | ||
1088 | |||
1089 | if (!txd->done) | ||
1090 | pl08x_release_mux(plchan); | ||
1091 | 889 | ||
1092 | pl08x_free_txd(plchan->host, txd); | 890 | static void pl08x_free_chan_resources(struct dma_chan *chan) |
891 | { | ||
1093 | } | 892 | } |
1094 | 893 | ||
1095 | static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, | 894 | /* |
1096 | struct pl08x_dma_chan *plchan) | 895 | * This should be called with the channel plchan->lock held |
896 | */ | ||
897 | static int prep_phy_channel(struct pl08x_dma_chan *plchan, | ||
898 | struct pl08x_txd *txd) | ||
1097 | { | 899 | { |
1098 | LIST_HEAD(head); | 900 | struct pl08x_driver_data *pl08x = plchan->host; |
1099 | struct pl08x_txd *txd; | 901 | struct pl08x_phy_chan *ch; |
902 | int ret; | ||
903 | |||
904 | /* Check if we already have a channel */ | ||
905 | if (plchan->phychan) | ||
906 | return 0; | ||
907 | |||
908 | ch = pl08x_get_phy_channel(pl08x, plchan); | ||
909 | if (!ch) { | ||
910 | /* No physical channel available, cope with it */ | ||
911 | dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); | ||
912 | return -EBUSY; | ||
913 | } | ||
1100 | 914 | ||
1101 | vchan_get_all_descriptors(&plchan->vc, &head); | 915 | /* |
916 | * OK we have a physical channel: for memcpy() this is all we | ||
917 | * need, but for slaves the physical signals may be muxed! | ||
918 | * Can the platform allow us to use this channel? | ||
919 | */ | ||
920 | if (plchan->slave && | ||
921 | ch->signal < 0 && | ||
922 | pl08x->pd->get_signal) { | ||
923 | ret = pl08x->pd->get_signal(plchan); | ||
924 | if (ret < 0) { | ||
925 | dev_dbg(&pl08x->adev->dev, | ||
926 | "unable to use physical channel %d for transfer on %s due to platform restrictions\n", | ||
927 | ch->id, plchan->name); | ||
928 | /* Release physical channel & return */ | ||
929 | pl08x_put_phy_channel(pl08x, ch); | ||
930 | return -EBUSY; | ||
931 | } | ||
932 | ch->signal = ret; | ||
1102 | 933 | ||
1103 | while (!list_empty(&head)) { | 934 | /* Assign the flow control signal to this channel */ |
1104 | txd = list_first_entry(&head, struct pl08x_txd, vd.node); | 935 | if (txd->direction == DMA_TO_DEVICE) |
1105 | list_del(&txd->vd.node); | 936 | txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT; |
1106 | pl08x_desc_free(&txd->vd); | 937 | else if (txd->direction == DMA_FROM_DEVICE) |
938 | txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT; | ||
1107 | } | 939 | } |
940 | |||
941 | dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", | ||
942 | ch->id, | ||
943 | ch->signal, | ||
944 | plchan->name); | ||
945 | |||
946 | plchan->phychan_hold++; | ||
947 | plchan->phychan = ch; | ||
948 | |||
949 | return 0; | ||
1108 | } | 950 | } |
1109 | 951 | ||
1110 | /* | 952 | static void release_phy_channel(struct pl08x_dma_chan *plchan) |
1111 | * The DMA ENGINE API | ||
1112 | */ | ||
1113 | static int pl08x_alloc_chan_resources(struct dma_chan *chan) | ||
1114 | { | 953 | { |
1115 | return 0; | 954 | struct pl08x_driver_data *pl08x = plchan->host; |
955 | |||
956 | if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) { | ||
957 | pl08x->pd->put_signal(plchan); | ||
958 | plchan->phychan->signal = -1; | ||
959 | } | ||
960 | pl08x_put_phy_channel(pl08x, plchan->phychan); | ||
961 | plchan->phychan = NULL; | ||
1116 | } | 962 | } |
1117 | 963 | ||
1118 | static void pl08x_free_chan_resources(struct dma_chan *chan) | 964 | static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) |
1119 | { | 965 | { |
1120 | /* Ensure all queued descriptors are freed */ | 966 | struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); |
1121 | vchan_free_chan_resources(to_virt_chan(chan)); | 967 | struct pl08x_txd *txd = to_pl08x_txd(tx); |
968 | unsigned long flags; | ||
969 | |||
970 | spin_lock_irqsave(&plchan->lock, flags); | ||
971 | |||
972 | plchan->chan.cookie += 1; | ||
973 | if (plchan->chan.cookie < 0) | ||
974 | plchan->chan.cookie = 1; | ||
975 | tx->cookie = plchan->chan.cookie; | ||
976 | |||
977 | /* Put this onto the pending list */ | ||
978 | list_add_tail(&txd->node, &plchan->pend_list); | ||
979 | |||
980 | /* | ||
981 | * If there was no physical channel available for this memcpy, | ||
982 | * stack the request up and indicate that the channel is waiting | ||
983 | * for a free physical channel. | ||
984 | */ | ||
985 | if (!plchan->slave && !plchan->phychan) { | ||
986 | /* Do this memcpy whenever there is a channel ready */ | ||
987 | plchan->state = PL08X_CHAN_WAITING; | ||
988 | plchan->waiting = txd; | ||
989 | } else { | ||
990 | plchan->phychan_hold--; | ||
991 | } | ||
992 | |||
993 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
994 | |||
995 | return tx->cookie; | ||
1122 | } | 996 | } |
1123 | 997 | ||
1124 | static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( | 998 | static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( |
@@ -1134,57 +1008,43 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( | |||
1134 | * If slaves are relying on interrupts to signal completion this function | 1008 | * If slaves are relying on interrupts to signal completion this function |
1135 | * must not be called with interrupts disabled. | 1009 | * must not be called with interrupts disabled. |
1136 | */ | 1010 | */ |
1137 | static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, | 1011 | static enum dma_status |
1138 | dma_cookie_t cookie, struct dma_tx_state *txstate) | 1012 | pl08x_dma_tx_status(struct dma_chan *chan, |
1013 | dma_cookie_t cookie, | ||
1014 | struct dma_tx_state *txstate) | ||
1139 | { | 1015 | { |
1140 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1016 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
1141 | struct virt_dma_desc *vd; | 1017 | dma_cookie_t last_used; |
1142 | unsigned long flags; | 1018 | dma_cookie_t last_complete; |
1143 | enum dma_status ret; | 1019 | enum dma_status ret; |
1144 | size_t bytes = 0; | 1020 | u32 bytesleft = 0; |
1145 | 1021 | ||
1146 | ret = dma_cookie_status(chan, cookie, txstate); | 1022 | last_used = plchan->chan.cookie; |
1147 | if (ret == DMA_SUCCESS) | 1023 | last_complete = plchan->lc; |
1148 | return ret; | ||
1149 | 1024 | ||
1150 | /* | 1025 | ret = dma_async_is_complete(cookie, last_complete, last_used); |
1151 | * There's no point calculating the residue if there's | 1026 | if (ret == DMA_SUCCESS) { |
1152 | * no txstate to store the value. | 1027 | dma_set_tx_state(txstate, last_complete, last_used, 0); |
1153 | */ | ||
1154 | if (!txstate) { | ||
1155 | if (plchan->state == PL08X_CHAN_PAUSED) | ||
1156 | ret = DMA_PAUSED; | ||
1157 | return ret; | 1028 | return ret; |
1158 | } | 1029 | } |
1159 | 1030 | ||
1160 | spin_lock_irqsave(&plchan->vc.lock, flags); | ||
1161 | ret = dma_cookie_status(chan, cookie, txstate); | ||
1162 | if (ret != DMA_SUCCESS) { | ||
1163 | vd = vchan_find_desc(&plchan->vc, cookie); | ||
1164 | if (vd) { | ||
1165 | /* On the issued list, so hasn't been processed yet */ | ||
1166 | struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); | ||
1167 | struct pl08x_sg *dsg; | ||
1168 | |||
1169 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
1170 | bytes += dsg->len; | ||
1171 | } else { | ||
1172 | bytes = pl08x_getbytes_chan(plchan); | ||
1173 | } | ||
1174 | } | ||
1175 | spin_unlock_irqrestore(&plchan->vc.lock, flags); | ||
1176 | |||
1177 | /* | 1031 | /* |
1178 | * This cookie not complete yet | 1032 | * This cookie not complete yet |
1179 | * Get number of bytes left in the active transactions and queue | ||
1180 | */ | 1033 | */ |
1181 | dma_set_residue(txstate, bytes); | 1034 | last_used = plchan->chan.cookie; |
1035 | last_complete = plchan->lc; | ||
1036 | |||
1037 | /* Get number of bytes left in the active transactions and queue */ | ||
1038 | bytesleft = pl08x_getbytes_chan(plchan); | ||
1039 | |||
1040 | dma_set_tx_state(txstate, last_complete, last_used, | ||
1041 | bytesleft); | ||
1182 | 1042 | ||
1183 | if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS) | 1043 | if (plchan->state == PL08X_CHAN_PAUSED) |
1184 | ret = DMA_PAUSED; | 1044 | return DMA_PAUSED; |
1185 | 1045 | ||
1186 | /* Whether waiting or running, we're in progress */ | 1046 | /* Whether waiting or running, we're in progress */ |
1187 | return ret; | 1047 | return DMA_IN_PROGRESS; |
1188 | } | 1048 | } |
1189 | 1049 | ||
1190 | /* PrimeCell DMA extension */ | 1050 | /* PrimeCell DMA extension */ |
@@ -1280,14 +1140,38 @@ static u32 pl08x_burst(u32 maxburst) | |||
1280 | return burst_sizes[i].reg; | 1140 | return burst_sizes[i].reg; |
1281 | } | 1141 | } |
1282 | 1142 | ||
1283 | static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan, | 1143 | static int dma_set_runtime_config(struct dma_chan *chan, |
1284 | enum dma_slave_buswidth addr_width, u32 maxburst) | 1144 | struct dma_slave_config *config) |
1285 | { | 1145 | { |
1286 | u32 width, burst, cctl = 0; | 1146 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
1147 | struct pl08x_driver_data *pl08x = plchan->host; | ||
1148 | enum dma_slave_buswidth addr_width; | ||
1149 | u32 width, burst, maxburst; | ||
1150 | u32 cctl = 0; | ||
1151 | |||
1152 | if (!plchan->slave) | ||
1153 | return -EINVAL; | ||
1154 | |||
1155 | /* Transfer direction */ | ||
1156 | plchan->runtime_direction = config->direction; | ||
1157 | if (config->direction == DMA_TO_DEVICE) { | ||
1158 | addr_width = config->dst_addr_width; | ||
1159 | maxburst = config->dst_maxburst; | ||
1160 | } else if (config->direction == DMA_FROM_DEVICE) { | ||
1161 | addr_width = config->src_addr_width; | ||
1162 | maxburst = config->src_maxburst; | ||
1163 | } else { | ||
1164 | dev_err(&pl08x->adev->dev, | ||
1165 | "bad runtime_config: alien transfer direction\n"); | ||
1166 | return -EINVAL; | ||
1167 | } | ||
1287 | 1168 | ||
1288 | width = pl08x_width(addr_width); | 1169 | width = pl08x_width(addr_width); |
1289 | if (width == ~0) | 1170 | if (width == ~0) { |
1290 | return ~0; | 1171 | dev_err(&pl08x->adev->dev, |
1172 | "bad runtime_config: alien address width\n"); | ||
1173 | return -EINVAL; | ||
1174 | } | ||
1291 | 1175 | ||
1292 | cctl |= width << PL080_CONTROL_SWIDTH_SHIFT; | 1176 | cctl |= width << PL080_CONTROL_SWIDTH_SHIFT; |
1293 | cctl |= width << PL080_CONTROL_DWIDTH_SHIFT; | 1177 | cctl |= width << PL080_CONTROL_DWIDTH_SHIFT; |
@@ -1304,23 +1188,26 @@ static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan, | |||
1304 | cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; | 1188 | cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; |
1305 | cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; | 1189 | cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; |
1306 | 1190 | ||
1307 | return pl08x_cctl(cctl); | 1191 | if (plchan->runtime_direction == DMA_FROM_DEVICE) { |
1308 | } | 1192 | plchan->src_addr = config->src_addr; |
1309 | 1193 | plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR | | |
1310 | static int dma_set_runtime_config(struct dma_chan *chan, | 1194 | pl08x_select_bus(plchan->cd->periph_buses, |
1311 | struct dma_slave_config *config) | 1195 | pl08x->mem_buses); |
1312 | { | 1196 | } else { |
1313 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1197 | plchan->dst_addr = config->dst_addr; |
1314 | 1198 | plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR | | |
1315 | if (!plchan->slave) | 1199 | pl08x_select_bus(pl08x->mem_buses, |
1316 | return -EINVAL; | 1200 | plchan->cd->periph_buses); |
1317 | 1201 | } | |
1318 | /* Reject definitely invalid configurations */ | ||
1319 | if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || | ||
1320 | config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) | ||
1321 | return -EINVAL; | ||
1322 | 1202 | ||
1323 | plchan->cfg = *config; | 1203 | dev_dbg(&pl08x->adev->dev, |
1204 | "configured channel %s (%s) for %s, data width %d, " | ||
1205 | "maxburst %d words, LE, CCTL=0x%08x\n", | ||
1206 | dma_chan_name(chan), plchan->name, | ||
1207 | (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", | ||
1208 | addr_width, | ||
1209 | maxburst, | ||
1210 | cctl); | ||
1324 | 1211 | ||
1325 | return 0; | 1212 | return 0; |
1326 | } | 1213 | } |
@@ -1334,20 +1221,93 @@ static void pl08x_issue_pending(struct dma_chan *chan) | |||
1334 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1221 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
1335 | unsigned long flags; | 1222 | unsigned long flags; |
1336 | 1223 | ||
1337 | spin_lock_irqsave(&plchan->vc.lock, flags); | 1224 | spin_lock_irqsave(&plchan->lock, flags); |
1338 | if (vchan_issue_pending(&plchan->vc)) { | 1225 | /* Something is already active, or we're waiting for a channel... */ |
1339 | if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING) | 1226 | if (plchan->at || plchan->state == PL08X_CHAN_WAITING) { |
1340 | pl08x_phy_alloc_and_start(plchan); | 1227 | spin_unlock_irqrestore(&plchan->lock, flags); |
1228 | return; | ||
1229 | } | ||
1230 | |||
1231 | /* Take the first element in the queue and execute it */ | ||
1232 | if (!list_empty(&plchan->pend_list)) { | ||
1233 | struct pl08x_txd *next; | ||
1234 | |||
1235 | next = list_first_entry(&plchan->pend_list, | ||
1236 | struct pl08x_txd, | ||
1237 | node); | ||
1238 | list_del(&next->node); | ||
1239 | plchan->state = PL08X_CHAN_RUNNING; | ||
1240 | |||
1241 | pl08x_start_txd(plchan, next); | ||
1242 | } | ||
1243 | |||
1244 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
1245 | } | ||
1246 | |||
1247 | static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, | ||
1248 | struct pl08x_txd *txd) | ||
1249 | { | ||
1250 | struct pl08x_driver_data *pl08x = plchan->host; | ||
1251 | unsigned long flags; | ||
1252 | int num_llis, ret; | ||
1253 | |||
1254 | num_llis = pl08x_fill_llis_for_desc(pl08x, txd); | ||
1255 | if (!num_llis) { | ||
1256 | kfree(txd); | ||
1257 | return -EINVAL; | ||
1341 | } | 1258 | } |
1342 | spin_unlock_irqrestore(&plchan->vc.lock, flags); | 1259 | |
1260 | spin_lock_irqsave(&plchan->lock, flags); | ||
1261 | |||
1262 | /* | ||
1263 | * See if we already have a physical channel allocated, | ||
1264 | * else this is the time to try to get one. | ||
1265 | */ | ||
1266 | ret = prep_phy_channel(plchan, txd); | ||
1267 | if (ret) { | ||
1268 | /* | ||
1269 | * No physical channel was available. | ||
1270 | * | ||
1271 | * memcpy transfers can be sorted out at submission time. | ||
1272 | * | ||
1273 | * Slave transfers may have been denied due to platform | ||
1274 | * channel muxing restrictions. Since there is no guarantee | ||
1275 | * that this will ever be resolved, and the signal must be | ||
1276 | * acquired AFTER acquiring the physical channel, we will let | ||
1277 | * them be NACK:ed with -EBUSY here. The drivers can retry | ||
1278 | * the prep() call if they are eager on doing this using DMA. | ||
1279 | */ | ||
1280 | if (plchan->slave) { | ||
1281 | pl08x_free_txd_list(pl08x, plchan); | ||
1282 | pl08x_free_txd(pl08x, txd); | ||
1283 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
1284 | return -EBUSY; | ||
1285 | } | ||
1286 | } else | ||
1287 | /* | ||
1288 | * Else we're all set, paused and ready to roll, status | ||
1289 | * will switch to PL08X_CHAN_RUNNING when we call | ||
1290 | * issue_pending(). If there is something running on the | ||
1291 | * channel already we don't change its state. | ||
1292 | */ | ||
1293 | if (plchan->state == PL08X_CHAN_IDLE) | ||
1294 | plchan->state = PL08X_CHAN_PAUSED; | ||
1295 | |||
1296 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
1297 | |||
1298 | return 0; | ||
1343 | } | 1299 | } |
1344 | 1300 | ||
1345 | static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan) | 1301 | static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, |
1302 | unsigned long flags) | ||
1346 | { | 1303 | { |
1347 | struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); | 1304 | struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); |
1348 | 1305 | ||
1349 | if (txd) { | 1306 | if (txd) { |
1350 | INIT_LIST_HEAD(&txd->dsg_list); | 1307 | dma_async_tx_descriptor_init(&txd->tx, &plchan->chan); |
1308 | txd->tx.flags = flags; | ||
1309 | txd->tx.tx_submit = pl08x_tx_submit; | ||
1310 | INIT_LIST_HEAD(&txd->node); | ||
1351 | 1311 | ||
1352 | /* Always enable error and terminal interrupts */ | 1312 | /* Always enable error and terminal interrupts */ |
1353 | txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK | | 1313 | txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK | |
@@ -1366,32 +1326,23 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( | |||
1366 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1326 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
1367 | struct pl08x_driver_data *pl08x = plchan->host; | 1327 | struct pl08x_driver_data *pl08x = plchan->host; |
1368 | struct pl08x_txd *txd; | 1328 | struct pl08x_txd *txd; |
1369 | struct pl08x_sg *dsg; | ||
1370 | int ret; | 1329 | int ret; |
1371 | 1330 | ||
1372 | txd = pl08x_get_txd(plchan); | 1331 | txd = pl08x_get_txd(plchan, flags); |
1373 | if (!txd) { | 1332 | if (!txd) { |
1374 | dev_err(&pl08x->adev->dev, | 1333 | dev_err(&pl08x->adev->dev, |
1375 | "%s no memory for descriptor\n", __func__); | 1334 | "%s no memory for descriptor\n", __func__); |
1376 | return NULL; | 1335 | return NULL; |
1377 | } | 1336 | } |
1378 | 1337 | ||
1379 | dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); | 1338 | txd->direction = DMA_NONE; |
1380 | if (!dsg) { | 1339 | txd->src_addr = src; |
1381 | pl08x_free_txd(pl08x, txd); | 1340 | txd->dst_addr = dest; |
1382 | dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n", | 1341 | txd->len = len; |
1383 | __func__); | ||
1384 | return NULL; | ||
1385 | } | ||
1386 | list_add_tail(&dsg->node, &txd->dsg_list); | ||
1387 | |||
1388 | dsg->src_addr = src; | ||
1389 | dsg->dst_addr = dest; | ||
1390 | dsg->len = len; | ||
1391 | 1342 | ||
1392 | /* Set platform data for m2m */ | 1343 | /* Set platform data for m2m */ |
1393 | txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; | 1344 | txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; |
1394 | txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy & | 1345 | txd->cctl = pl08x->pd->memcpy_channel.cctl & |
1395 | ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); | 1346 | ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); |
1396 | 1347 | ||
1397 | /* Both to be incremented or the code will break */ | 1348 | /* Both to be incremented or the code will break */ |
@@ -1401,132 +1352,75 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( | |||
1401 | txd->cctl |= pl08x_select_bus(pl08x->mem_buses, | 1352 | txd->cctl |= pl08x_select_bus(pl08x->mem_buses, |
1402 | pl08x->mem_buses); | 1353 | pl08x->mem_buses); |
1403 | 1354 | ||
1404 | ret = pl08x_fill_llis_for_desc(plchan->host, txd); | 1355 | ret = pl08x_prep_channel_resources(plchan, txd); |
1405 | if (!ret) { | 1356 | if (ret) |
1406 | pl08x_free_txd(pl08x, txd); | ||
1407 | return NULL; | 1357 | return NULL; |
1408 | } | ||
1409 | 1358 | ||
1410 | return vchan_tx_prep(&plchan->vc, &txd->vd, flags); | 1359 | return &txd->tx; |
1411 | } | 1360 | } |
1412 | 1361 | ||
1413 | static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | 1362 | static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( |
1414 | struct dma_chan *chan, struct scatterlist *sgl, | 1363 | struct dma_chan *chan, struct scatterlist *sgl, |
1415 | unsigned int sg_len, enum dma_transfer_direction direction, | 1364 | unsigned int sg_len, enum dma_data_direction direction, |
1416 | unsigned long flags, void *context) | 1365 | unsigned long flags) |
1417 | { | 1366 | { |
1418 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1367 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
1419 | struct pl08x_driver_data *pl08x = plchan->host; | 1368 | struct pl08x_driver_data *pl08x = plchan->host; |
1420 | struct pl08x_txd *txd; | 1369 | struct pl08x_txd *txd; |
1421 | struct pl08x_sg *dsg; | 1370 | int ret; |
1422 | struct scatterlist *sg; | 1371 | |
1423 | enum dma_slave_buswidth addr_width; | 1372 | /* |
1424 | dma_addr_t slave_addr; | 1373 | * Current implementation ASSUMES only one sg |
1425 | int ret, tmp; | 1374 | */ |
1426 | u8 src_buses, dst_buses; | 1375 | if (sg_len != 1) { |
1427 | u32 maxburst, cctl; | 1376 | dev_err(&pl08x->adev->dev, "%s prepared too long sglist\n", |
1377 | __func__); | ||
1378 | BUG(); | ||
1379 | } | ||
1428 | 1380 | ||
1429 | dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", | 1381 | dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", |
1430 | __func__, sg_dma_len(sgl), plchan->name); | 1382 | __func__, sgl->length, plchan->name); |
1431 | 1383 | ||
1432 | txd = pl08x_get_txd(plchan); | 1384 | txd = pl08x_get_txd(plchan, flags); |
1433 | if (!txd) { | 1385 | if (!txd) { |
1434 | dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); | 1386 | dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); |
1435 | return NULL; | 1387 | return NULL; |
1436 | } | 1388 | } |
1437 | 1389 | ||
1390 | if (direction != plchan->runtime_direction) | ||
1391 | dev_err(&pl08x->adev->dev, "%s DMA setup does not match " | ||
1392 | "the direction configured for the PrimeCell\n", | ||
1393 | __func__); | ||
1394 | |||
1438 | /* | 1395 | /* |
1439 | * Set up addresses, the PrimeCell configured address | 1396 | * Set up addresses, the PrimeCell configured address |
1440 | * will take precedence since this may configure the | 1397 | * will take precedence since this may configure the |
1441 | * channel target address dynamically at runtime. | 1398 | * channel target address dynamically at runtime. |
1442 | */ | 1399 | */ |
1443 | if (direction == DMA_MEM_TO_DEV) { | 1400 | txd->direction = direction; |
1444 | cctl = PL080_CONTROL_SRC_INCR; | 1401 | txd->len = sgl->length; |
1445 | slave_addr = plchan->cfg.dst_addr; | 1402 | |
1446 | addr_width = plchan->cfg.dst_addr_width; | 1403 | if (direction == DMA_TO_DEVICE) { |
1447 | maxburst = plchan->cfg.dst_maxburst; | 1404 | txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT; |
1448 | src_buses = pl08x->mem_buses; | 1405 | txd->cctl = plchan->dst_cctl; |
1449 | dst_buses = plchan->cd->periph_buses; | 1406 | txd->src_addr = sgl->dma_address; |
1450 | } else if (direction == DMA_DEV_TO_MEM) { | 1407 | txd->dst_addr = plchan->dst_addr; |
1451 | cctl = PL080_CONTROL_DST_INCR; | 1408 | } else if (direction == DMA_FROM_DEVICE) { |
1452 | slave_addr = plchan->cfg.src_addr; | 1409 | txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; |
1453 | addr_width = plchan->cfg.src_addr_width; | 1410 | txd->cctl = plchan->src_cctl; |
1454 | maxburst = plchan->cfg.src_maxburst; | 1411 | txd->src_addr = plchan->src_addr; |
1455 | src_buses = plchan->cd->periph_buses; | 1412 | txd->dst_addr = sgl->dma_address; |
1456 | dst_buses = pl08x->mem_buses; | ||
1457 | } else { | 1413 | } else { |
1458 | pl08x_free_txd(pl08x, txd); | ||
1459 | dev_err(&pl08x->adev->dev, | 1414 | dev_err(&pl08x->adev->dev, |
1460 | "%s direction unsupported\n", __func__); | 1415 | "%s direction unsupported\n", __func__); |
1461 | return NULL; | 1416 | return NULL; |
1462 | } | 1417 | } |
1463 | 1418 | ||
1464 | cctl |= pl08x_get_cctl(plchan, addr_width, maxburst); | 1419 | ret = pl08x_prep_channel_resources(plchan, txd); |
1465 | if (cctl == ~0) { | 1420 | if (ret) |
1466 | pl08x_free_txd(pl08x, txd); | ||
1467 | dev_err(&pl08x->adev->dev, | ||
1468 | "DMA slave configuration botched?\n"); | ||
1469 | return NULL; | ||
1470 | } | ||
1471 | |||
1472 | txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses); | ||
1473 | |||
1474 | if (plchan->cfg.device_fc) | ||
1475 | tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : | ||
1476 | PL080_FLOW_PER2MEM_PER; | ||
1477 | else | ||
1478 | tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER : | ||
1479 | PL080_FLOW_PER2MEM; | ||
1480 | |||
1481 | txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; | ||
1482 | |||
1483 | ret = pl08x_request_mux(plchan); | ||
1484 | if (ret < 0) { | ||
1485 | pl08x_free_txd(pl08x, txd); | ||
1486 | dev_dbg(&pl08x->adev->dev, | ||
1487 | "unable to mux for transfer on %s due to platform restrictions\n", | ||
1488 | plchan->name); | ||
1489 | return NULL; | ||
1490 | } | ||
1491 | |||
1492 | dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n", | ||
1493 | plchan->signal, plchan->name); | ||
1494 | |||
1495 | /* Assign the flow control signal to this channel */ | ||
1496 | if (direction == DMA_MEM_TO_DEV) | ||
1497 | txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT; | ||
1498 | else | ||
1499 | txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT; | ||
1500 | |||
1501 | for_each_sg(sgl, sg, sg_len, tmp) { | ||
1502 | dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); | ||
1503 | if (!dsg) { | ||
1504 | pl08x_release_mux(plchan); | ||
1505 | pl08x_free_txd(pl08x, txd); | ||
1506 | dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n", | ||
1507 | __func__); | ||
1508 | return NULL; | ||
1509 | } | ||
1510 | list_add_tail(&dsg->node, &txd->dsg_list); | ||
1511 | |||
1512 | dsg->len = sg_dma_len(sg); | ||
1513 | if (direction == DMA_MEM_TO_DEV) { | ||
1514 | dsg->src_addr = sg_dma_address(sg); | ||
1515 | dsg->dst_addr = slave_addr; | ||
1516 | } else { | ||
1517 | dsg->src_addr = slave_addr; | ||
1518 | dsg->dst_addr = sg_dma_address(sg); | ||
1519 | } | ||
1520 | } | ||
1521 | |||
1522 | ret = pl08x_fill_llis_for_desc(plchan->host, txd); | ||
1523 | if (!ret) { | ||
1524 | pl08x_release_mux(plchan); | ||
1525 | pl08x_free_txd(pl08x, txd); | ||
1526 | return NULL; | 1421 | return NULL; |
1527 | } | ||
1528 | 1422 | ||
1529 | return vchan_tx_prep(&plchan->vc, &txd->vd, flags); | 1423 | return &txd->tx; |
1530 | } | 1424 | } |
1531 | 1425 | ||
1532 | static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 1426 | static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
@@ -1547,9 +1441,9 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1547 | * Anything succeeds on channels with no physical allocation and | 1441 | * Anything succeeds on channels with no physical allocation and |
1548 | * no queued transfers. | 1442 | * no queued transfers. |
1549 | */ | 1443 | */ |
1550 | spin_lock_irqsave(&plchan->vc.lock, flags); | 1444 | spin_lock_irqsave(&plchan->lock, flags); |
1551 | if (!plchan->phychan && !plchan->at) { | 1445 | if (!plchan->phychan && !plchan->at) { |
1552 | spin_unlock_irqrestore(&plchan->vc.lock, flags); | 1446 | spin_unlock_irqrestore(&plchan->lock, flags); |
1553 | return 0; | 1447 | return 0; |
1554 | } | 1448 | } |
1555 | 1449 | ||
@@ -1558,15 +1452,17 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1558 | plchan->state = PL08X_CHAN_IDLE; | 1452 | plchan->state = PL08X_CHAN_IDLE; |
1559 | 1453 | ||
1560 | if (plchan->phychan) { | 1454 | if (plchan->phychan) { |
1455 | pl08x_terminate_phy_chan(pl08x, plchan->phychan); | ||
1456 | |||
1561 | /* | 1457 | /* |
1562 | * Mark physical channel as free and free any slave | 1458 | * Mark physical channel as free and free any slave |
1563 | * signal | 1459 | * signal |
1564 | */ | 1460 | */ |
1565 | pl08x_phy_free(plchan); | 1461 | release_phy_channel(plchan); |
1566 | } | 1462 | } |
1567 | /* Dequeue jobs and free LLIs */ | 1463 | /* Dequeue jobs and free LLIs */ |
1568 | if (plchan->at) { | 1464 | if (plchan->at) { |
1569 | pl08x_desc_free(&plchan->at->vd); | 1465 | pl08x_free_txd(pl08x, plchan->at); |
1570 | plchan->at = NULL; | 1466 | plchan->at = NULL; |
1571 | } | 1467 | } |
1572 | /* Dequeue jobs not yet fired as well */ | 1468 | /* Dequeue jobs not yet fired as well */ |
@@ -1586,22 +1482,16 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1586 | break; | 1482 | break; |
1587 | } | 1483 | } |
1588 | 1484 | ||
1589 | spin_unlock_irqrestore(&plchan->vc.lock, flags); | 1485 | spin_unlock_irqrestore(&plchan->lock, flags); |
1590 | 1486 | ||
1591 | return ret; | 1487 | return ret; |
1592 | } | 1488 | } |
1593 | 1489 | ||
1594 | bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) | 1490 | bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) |
1595 | { | 1491 | { |
1596 | struct pl08x_dma_chan *plchan; | 1492 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
1597 | char *name = chan_id; | 1493 | char *name = chan_id; |
1598 | 1494 | ||
1599 | /* Reject channels for devices not bound to this driver */ | ||
1600 | if (chan->device->dev->driver != &pl08x_amba_driver.drv) | ||
1601 | return false; | ||
1602 | |||
1603 | plchan = to_pl08x_chan(chan); | ||
1604 | |||
1605 | /* Check that the channel is not taken! */ | 1495 | /* Check that the channel is not taken! */ |
1606 | if (!strcmp(plchan->name, name)) | 1496 | if (!strcmp(plchan->name, name)) |
1607 | return true; | 1497 | return true; |
@@ -1617,81 +1507,177 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) | |||
1617 | */ | 1507 | */ |
1618 | static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) | 1508 | static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) |
1619 | { | 1509 | { |
1620 | /* The Nomadik variant does not have the config register */ | 1510 | u32 val; |
1621 | if (pl08x->vd->nomadik) | 1511 | |
1622 | return; | 1512 | val = readl(pl08x->base + PL080_CONFIG); |
1623 | writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); | 1513 | val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE); |
1514 | /* We implicitly clear bit 1 and that means little-endian mode */ | ||
1515 | val |= PL080_CONFIG_ENABLE; | ||
1516 | writel(val, pl08x->base + PL080_CONFIG); | ||
1624 | } | 1517 | } |
1625 | 1518 | ||
1626 | static irqreturn_t pl08x_irq(int irq, void *dev) | 1519 | static void pl08x_unmap_buffers(struct pl08x_txd *txd) |
1627 | { | 1520 | { |
1628 | struct pl08x_driver_data *pl08x = dev; | 1521 | struct device *dev = txd->tx.chan->device->dev; |
1629 | u32 mask = 0, err, tc, i; | 1522 | |
1630 | 1523 | if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | |
1631 | /* check & clear - ERR & TC interrupts */ | 1524 | if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) |
1632 | err = readl(pl08x->base + PL080_ERR_STATUS); | 1525 | dma_unmap_single(dev, txd->src_addr, txd->len, |
1633 | if (err) { | 1526 | DMA_TO_DEVICE); |
1634 | dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n", | 1527 | else |
1635 | __func__, err); | 1528 | dma_unmap_page(dev, txd->src_addr, txd->len, |
1636 | writel(err, pl08x->base + PL080_ERR_CLEAR); | 1529 | DMA_TO_DEVICE); |
1530 | } | ||
1531 | if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
1532 | if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
1533 | dma_unmap_single(dev, txd->dst_addr, txd->len, | ||
1534 | DMA_FROM_DEVICE); | ||
1535 | else | ||
1536 | dma_unmap_page(dev, txd->dst_addr, txd->len, | ||
1537 | DMA_FROM_DEVICE); | ||
1538 | } | ||
1539 | } | ||
1540 | |||
1541 | static void pl08x_tasklet(unsigned long data) | ||
1542 | { | ||
1543 | struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data; | ||
1544 | struct pl08x_driver_data *pl08x = plchan->host; | ||
1545 | struct pl08x_txd *txd; | ||
1546 | unsigned long flags; | ||
1547 | |||
1548 | spin_lock_irqsave(&plchan->lock, flags); | ||
1549 | |||
1550 | txd = plchan->at; | ||
1551 | plchan->at = NULL; | ||
1552 | |||
1553 | if (txd) { | ||
1554 | /* Update last completed */ | ||
1555 | plchan->lc = txd->tx.cookie; | ||
1637 | } | 1556 | } |
1638 | tc = readl(pl08x->base + PL080_TC_STATUS); | ||
1639 | if (tc) | ||
1640 | writel(tc, pl08x->base + PL080_TC_CLEAR); | ||
1641 | 1557 | ||
1642 | if (!err && !tc) | 1558 | /* If a new descriptor is queued, set it up plchan->at is NULL here */ |
1643 | return IRQ_NONE; | 1559 | if (!list_empty(&plchan->pend_list)) { |
1560 | struct pl08x_txd *next; | ||
1561 | |||
1562 | next = list_first_entry(&plchan->pend_list, | ||
1563 | struct pl08x_txd, | ||
1564 | node); | ||
1565 | list_del(&next->node); | ||
1566 | |||
1567 | pl08x_start_txd(plchan, next); | ||
1568 | } else if (plchan->phychan_hold) { | ||
1569 | /* | ||
1570 | * This channel is still in use - we have a new txd being | ||
1571 | * prepared and will soon be queued. Don't give up the | ||
1572 | * physical channel. | ||
1573 | */ | ||
1574 | } else { | ||
1575 | struct pl08x_dma_chan *waiting = NULL; | ||
1644 | 1576 | ||
1577 | /* | ||
1578 | * No more jobs, so free up the physical channel | ||
1579 | * Free any allocated signal on slave transfers too | ||
1580 | */ | ||
1581 | release_phy_channel(plchan); | ||
1582 | plchan->state = PL08X_CHAN_IDLE; | ||
1583 | |||
1584 | /* | ||
1585 | * And NOW before anyone else can grab that free:d up | ||
1586 | * physical channel, see if there is some memcpy pending | ||
1587 | * that seriously needs to start because of being stacked | ||
1588 | * up while we were choking the physical channels with data. | ||
1589 | */ | ||
1590 | list_for_each_entry(waiting, &pl08x->memcpy.channels, | ||
1591 | chan.device_node) { | ||
1592 | if (waiting->state == PL08X_CHAN_WAITING && | ||
1593 | waiting->waiting != NULL) { | ||
1594 | int ret; | ||
1595 | |||
1596 | /* This should REALLY not fail now */ | ||
1597 | ret = prep_phy_channel(waiting, | ||
1598 | waiting->waiting); | ||
1599 | BUG_ON(ret); | ||
1600 | waiting->phychan_hold--; | ||
1601 | waiting->state = PL08X_CHAN_RUNNING; | ||
1602 | waiting->waiting = NULL; | ||
1603 | pl08x_issue_pending(&waiting->chan); | ||
1604 | break; | ||
1605 | } | ||
1606 | } | ||
1607 | } | ||
1608 | |||
1609 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
1610 | |||
1611 | if (txd) { | ||
1612 | dma_async_tx_callback callback = txd->tx.callback; | ||
1613 | void *callback_param = txd->tx.callback_param; | ||
1614 | |||
1615 | /* Don't try to unmap buffers on slave channels */ | ||
1616 | if (!plchan->slave) | ||
1617 | pl08x_unmap_buffers(txd); | ||
1618 | |||
1619 | /* Free the descriptor */ | ||
1620 | spin_lock_irqsave(&plchan->lock, flags); | ||
1621 | pl08x_free_txd(pl08x, txd); | ||
1622 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
1623 | |||
1624 | /* Callback to signal completion */ | ||
1625 | if (callback) | ||
1626 | callback(callback_param); | ||
1627 | } | ||
1628 | } | ||
1629 | |||
1630 | static irqreturn_t pl08x_irq(int irq, void *dev) | ||
1631 | { | ||
1632 | struct pl08x_driver_data *pl08x = dev; | ||
1633 | u32 mask = 0; | ||
1634 | u32 val; | ||
1635 | int i; | ||
1636 | |||
1637 | val = readl(pl08x->base + PL080_ERR_STATUS); | ||
1638 | if (val) { | ||
1639 | /* An error interrupt (on one or more channels) */ | ||
1640 | dev_err(&pl08x->adev->dev, | ||
1641 | "%s error interrupt, register value 0x%08x\n", | ||
1642 | __func__, val); | ||
1643 | /* | ||
1644 | * Simply clear ALL PL08X error interrupts, | ||
1645 | * regardless of channel and cause | ||
1646 | * FIXME: should be 0x00000003 on PL081 really. | ||
1647 | */ | ||
1648 | writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); | ||
1649 | } | ||
1650 | val = readl(pl08x->base + PL080_INT_STATUS); | ||
1645 | for (i = 0; i < pl08x->vd->channels; i++) { | 1651 | for (i = 0; i < pl08x->vd->channels; i++) { |
1646 | if (((1 << i) & err) || ((1 << i) & tc)) { | 1652 | if ((1 << i) & val) { |
1647 | /* Locate physical channel */ | 1653 | /* Locate physical channel */ |
1648 | struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; | 1654 | struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; |
1649 | struct pl08x_dma_chan *plchan = phychan->serving; | 1655 | struct pl08x_dma_chan *plchan = phychan->serving; |
1650 | struct pl08x_txd *tx; | ||
1651 | 1656 | ||
1652 | if (!plchan) { | 1657 | /* Schedule tasklet on this channel */ |
1653 | dev_err(&pl08x->adev->dev, | 1658 | tasklet_schedule(&plchan->tasklet); |
1654 | "%s Error TC interrupt on unused channel: 0x%08x\n", | ||
1655 | __func__, i); | ||
1656 | continue; | ||
1657 | } | ||
1658 | |||
1659 | spin_lock(&plchan->vc.lock); | ||
1660 | tx = plchan->at; | ||
1661 | if (tx) { | ||
1662 | plchan->at = NULL; | ||
1663 | /* | ||
1664 | * This descriptor is done, release its mux | ||
1665 | * reservation. | ||
1666 | */ | ||
1667 | pl08x_release_mux(plchan); | ||
1668 | tx->done = true; | ||
1669 | vchan_cookie_complete(&tx->vd); | ||
1670 | |||
1671 | /* | ||
1672 | * And start the next descriptor (if any), | ||
1673 | * otherwise free this channel. | ||
1674 | */ | ||
1675 | if (vchan_next_desc(&plchan->vc)) | ||
1676 | pl08x_start_next_txd(plchan); | ||
1677 | else | ||
1678 | pl08x_phy_free(plchan); | ||
1679 | } | ||
1680 | spin_unlock(&plchan->vc.lock); | ||
1681 | 1659 | ||
1682 | mask |= (1 << i); | 1660 | mask |= (1 << i); |
1683 | } | 1661 | } |
1684 | } | 1662 | } |
1663 | /* Clear only the terminal interrupts on channels we processed */ | ||
1664 | writel(mask, pl08x->base + PL080_TC_CLEAR); | ||
1685 | 1665 | ||
1686 | return mask ? IRQ_HANDLED : IRQ_NONE; | 1666 | return mask ? IRQ_HANDLED : IRQ_NONE; |
1687 | } | 1667 | } |
1688 | 1668 | ||
1689 | static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) | 1669 | static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) |
1690 | { | 1670 | { |
1671 | u32 cctl = pl08x_cctl(chan->cd->cctl); | ||
1672 | |||
1691 | chan->slave = true; | 1673 | chan->slave = true; |
1692 | chan->name = chan->cd->bus_id; | 1674 | chan->name = chan->cd->bus_id; |
1693 | chan->cfg.src_addr = chan->cd->addr; | 1675 | chan->src_addr = chan->cd->addr; |
1694 | chan->cfg.dst_addr = chan->cd->addr; | 1676 | chan->dst_addr = chan->cd->addr; |
1677 | chan->src_cctl = cctl | PL080_CONTROL_DST_INCR | | ||
1678 | pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses); | ||
1679 | chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR | | ||
1680 | pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses); | ||
1695 | } | 1681 | } |
1696 | 1682 | ||
1697 | /* | 1683 | /* |
@@ -1699,7 +1685,9 @@ static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) | |||
1699 | * Make a local wrapper to hold required data | 1685 | * Make a local wrapper to hold required data |
1700 | */ | 1686 | */ |
1701 | static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, | 1687 | static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, |
1702 | struct dma_device *dmadev, unsigned int channels, bool slave) | 1688 | struct dma_device *dmadev, |
1689 | unsigned int channels, | ||
1690 | bool slave) | ||
1703 | { | 1691 | { |
1704 | struct pl08x_dma_chan *chan; | 1692 | struct pl08x_dma_chan *chan; |
1705 | int i; | 1693 | int i; |
@@ -1712,7 +1700,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, | |||
1712 | * to cope with that situation. | 1700 | * to cope with that situation. |
1713 | */ | 1701 | */ |
1714 | for (i = 0; i < channels; i++) { | 1702 | for (i = 0; i < channels; i++) { |
1715 | chan = kzalloc(sizeof(*chan), GFP_KERNEL); | 1703 | chan = kzalloc(sizeof(struct pl08x_dma_chan), GFP_KERNEL); |
1716 | if (!chan) { | 1704 | if (!chan) { |
1717 | dev_err(&pl08x->adev->dev, | 1705 | dev_err(&pl08x->adev->dev, |
1718 | "%s no memory for channel\n", __func__); | 1706 | "%s no memory for channel\n", __func__); |
@@ -1721,7 +1709,6 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, | |||
1721 | 1709 | ||
1722 | chan->host = pl08x; | 1710 | chan->host = pl08x; |
1723 | chan->state = PL08X_CHAN_IDLE; | 1711 | chan->state = PL08X_CHAN_IDLE; |
1724 | chan->signal = -1; | ||
1725 | 1712 | ||
1726 | if (slave) { | 1713 | if (slave) { |
1727 | chan->cd = &pl08x->pd->slave_channels[i]; | 1714 | chan->cd = &pl08x->pd->slave_channels[i]; |
@@ -1734,12 +1721,27 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, | |||
1734 | return -ENOMEM; | 1721 | return -ENOMEM; |
1735 | } | 1722 | } |
1736 | } | 1723 | } |
1737 | dev_dbg(&pl08x->adev->dev, | 1724 | if (chan->cd->circular_buffer) { |
1725 | dev_err(&pl08x->adev->dev, | ||
1726 | "channel %s: circular buffers not supported\n", | ||
1727 | chan->name); | ||
1728 | kfree(chan); | ||
1729 | continue; | ||
1730 | } | ||
1731 | dev_info(&pl08x->adev->dev, | ||
1738 | "initialize virtual channel \"%s\"\n", | 1732 | "initialize virtual channel \"%s\"\n", |
1739 | chan->name); | 1733 | chan->name); |
1740 | 1734 | ||
1741 | chan->vc.desc_free = pl08x_desc_free; | 1735 | chan->chan.device = dmadev; |
1742 | vchan_init(&chan->vc, dmadev); | 1736 | chan->chan.cookie = 0; |
1737 | chan->lc = 0; | ||
1738 | |||
1739 | spin_lock_init(&chan->lock); | ||
1740 | INIT_LIST_HEAD(&chan->pend_list); | ||
1741 | tasklet_init(&chan->tasklet, pl08x_tasklet, | ||
1742 | (unsigned long) chan); | ||
1743 | |||
1744 | list_add_tail(&chan->chan.device_node, &dmadev->channels); | ||
1743 | } | 1745 | } |
1744 | dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", | 1746 | dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", |
1745 | i, slave ? "slave" : "memcpy"); | 1747 | i, slave ? "slave" : "memcpy"); |
@@ -1752,8 +1754,8 @@ static void pl08x_free_virtual_channels(struct dma_device *dmadev) | |||
1752 | struct pl08x_dma_chan *next; | 1754 | struct pl08x_dma_chan *next; |
1753 | 1755 | ||
1754 | list_for_each_entry_safe(chan, | 1756 | list_for_each_entry_safe(chan, |
1755 | next, &dmadev->channels, vc.chan.device_node) { | 1757 | next, &dmadev->channels, chan.device_node) { |
1756 | list_del(&chan->vc.chan.device_node); | 1758 | list_del(&chan->chan.device_node); |
1757 | kfree(chan); | 1759 | kfree(chan); |
1758 | } | 1760 | } |
1759 | } | 1761 | } |
@@ -1795,10 +1797,8 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) | |||
1795 | spin_lock_irqsave(&ch->lock, flags); | 1797 | spin_lock_irqsave(&ch->lock, flags); |
1796 | virt_chan = ch->serving; | 1798 | virt_chan = ch->serving; |
1797 | 1799 | ||
1798 | seq_printf(s, "%d\t\t%s%s\n", | 1800 | seq_printf(s, "%d\t\t%s\n", |
1799 | ch->id, | 1801 | ch->id, virt_chan ? virt_chan->name : "(none)"); |
1800 | virt_chan ? virt_chan->name : "(none)", | ||
1801 | ch->locked ? " LOCKED" : ""); | ||
1802 | 1802 | ||
1803 | spin_unlock_irqrestore(&ch->lock, flags); | 1803 | spin_unlock_irqrestore(&ch->lock, flags); |
1804 | } | 1804 | } |
@@ -1806,7 +1806,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) | |||
1806 | seq_printf(s, "\nPL08x virtual memcpy channels:\n"); | 1806 | seq_printf(s, "\nPL08x virtual memcpy channels:\n"); |
1807 | seq_printf(s, "CHANNEL:\tSTATE:\n"); | 1807 | seq_printf(s, "CHANNEL:\tSTATE:\n"); |
1808 | seq_printf(s, "--------\t------\n"); | 1808 | seq_printf(s, "--------\t------\n"); |
1809 | list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) { | 1809 | list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { |
1810 | seq_printf(s, "%s\t\t%s\n", chan->name, | 1810 | seq_printf(s, "%s\t\t%s\n", chan->name, |
1811 | pl08x_state_str(chan->state)); | 1811 | pl08x_state_str(chan->state)); |
1812 | } | 1812 | } |
@@ -1814,7 +1814,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) | |||
1814 | seq_printf(s, "\nPL08x virtual slave channels:\n"); | 1814 | seq_printf(s, "\nPL08x virtual slave channels:\n"); |
1815 | seq_printf(s, "CHANNEL:\tSTATE:\n"); | 1815 | seq_printf(s, "CHANNEL:\tSTATE:\n"); |
1816 | seq_printf(s, "--------\t------\n"); | 1816 | seq_printf(s, "--------\t------\n"); |
1817 | list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) { | 1817 | list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { |
1818 | seq_printf(s, "%s\t\t%s\n", chan->name, | 1818 | seq_printf(s, "%s\t\t%s\n", chan->name, |
1819 | pl08x_state_str(chan->state)); | 1819 | pl08x_state_str(chan->state)); |
1820 | } | 1820 | } |
@@ -1837,9 +1837,9 @@ static const struct file_operations pl08x_debugfs_operations = { | |||
1837 | static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) | 1837 | static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) |
1838 | { | 1838 | { |
1839 | /* Expose a simple debugfs interface to view all clocks */ | 1839 | /* Expose a simple debugfs interface to view all clocks */ |
1840 | (void) debugfs_create_file(dev_name(&pl08x->adev->dev), | 1840 | (void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO, |
1841 | S_IFREG | S_IRUGO, NULL, pl08x, | 1841 | NULL, pl08x, |
1842 | &pl08x_debugfs_operations); | 1842 | &pl08x_debugfs_operations); |
1843 | } | 1843 | } |
1844 | 1844 | ||
1845 | #else | 1845 | #else |
@@ -1860,7 +1860,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
1860 | return ret; | 1860 | return ret; |
1861 | 1861 | ||
1862 | /* Create the driver state holder */ | 1862 | /* Create the driver state holder */ |
1863 | pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL); | 1863 | pl08x = kzalloc(sizeof(struct pl08x_driver_data), GFP_KERNEL); |
1864 | if (!pl08x) { | 1864 | if (!pl08x) { |
1865 | ret = -ENOMEM; | 1865 | ret = -ENOMEM; |
1866 | goto out_no_pl08x; | 1866 | goto out_no_pl08x; |
@@ -1892,7 +1892,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
1892 | pl08x->pd = dev_get_platdata(&adev->dev); | 1892 | pl08x->pd = dev_get_platdata(&adev->dev); |
1893 | if (!pl08x->pd) { | 1893 | if (!pl08x->pd) { |
1894 | dev_err(&adev->dev, "no platform data supplied\n"); | 1894 | dev_err(&adev->dev, "no platform data supplied\n"); |
1895 | ret = -EINVAL; | ||
1896 | goto out_no_platdata; | 1895 | goto out_no_platdata; |
1897 | } | 1896 | } |
1898 | 1897 | ||
@@ -1916,6 +1915,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
1916 | goto out_no_lli_pool; | 1915 | goto out_no_lli_pool; |
1917 | } | 1916 | } |
1918 | 1917 | ||
1918 | spin_lock_init(&pl08x->lock); | ||
1919 | |||
1919 | pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); | 1920 | pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); |
1920 | if (!pl08x->base) { | 1921 | if (!pl08x->base) { |
1921 | ret = -ENOMEM; | 1922 | ret = -ENOMEM; |
@@ -1938,13 +1939,12 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
1938 | } | 1939 | } |
1939 | 1940 | ||
1940 | /* Initialize physical channels */ | 1941 | /* Initialize physical channels */ |
1941 | pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)), | 1942 | pl08x->phy_chans = kmalloc((vd->channels * sizeof(struct pl08x_phy_chan)), |
1942 | GFP_KERNEL); | 1943 | GFP_KERNEL); |
1943 | if (!pl08x->phy_chans) { | 1944 | if (!pl08x->phy_chans) { |
1944 | dev_err(&adev->dev, "%s failed to allocate " | 1945 | dev_err(&adev->dev, "%s failed to allocate " |
1945 | "physical channel holders\n", | 1946 | "physical channel holders\n", |
1946 | __func__); | 1947 | __func__); |
1947 | ret = -ENOMEM; | ||
1948 | goto out_no_phychans; | 1948 | goto out_no_phychans; |
1949 | } | 1949 | } |
1950 | 1950 | ||
@@ -1954,24 +1954,11 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
1954 | ch->id = i; | 1954 | ch->id = i; |
1955 | ch->base = pl08x->base + PL080_Cx_BASE(i); | 1955 | ch->base = pl08x->base + PL080_Cx_BASE(i); |
1956 | spin_lock_init(&ch->lock); | 1956 | spin_lock_init(&ch->lock); |
1957 | 1957 | ch->serving = NULL; | |
1958 | /* | 1958 | ch->signal = -1; |
1959 | * Nomadik variants can have channels that are locked | 1959 | dev_info(&adev->dev, |
1960 | * down for the secure world only. Lock up these channels | 1960 | "physical channel %d is %s\n", i, |
1961 | * by perpetually serving a dummy virtual channel. | 1961 | pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); |
1962 | */ | ||
1963 | if (vd->nomadik) { | ||
1964 | u32 val; | ||
1965 | |||
1966 | val = readl(ch->base + PL080_CH_CONFIG); | ||
1967 | if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) { | ||
1968 | dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i); | ||
1969 | ch->locked = true; | ||
1970 | } | ||
1971 | } | ||
1972 | |||
1973 | dev_dbg(&adev->dev, "physical channel %d is %s\n", | ||
1974 | i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); | ||
1975 | } | 1962 | } |
1976 | 1963 | ||
1977 | /* Register as many memcpy channels as there are physical channels */ | 1964 | /* Register as many memcpy channels as there are physical channels */ |
@@ -1987,7 +1974,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
1987 | 1974 | ||
1988 | /* Register slave channels */ | 1975 | /* Register slave channels */ |
1989 | ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, | 1976 | ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, |
1990 | pl08x->pd->num_slave_channels, true); | 1977 | pl08x->pd->num_slave_channels, |
1978 | true); | ||
1991 | if (ret <= 0) { | 1979 | if (ret <= 0) { |
1992 | dev_warn(&pl08x->adev->dev, | 1980 | dev_warn(&pl08x->adev->dev, |
1993 | "%s failed to enumerate slave channels - %d\n", | 1981 | "%s failed to enumerate slave channels - %d\n", |
@@ -2017,7 +2005,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
2017 | dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n", | 2005 | dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n", |
2018 | amba_part(adev), amba_rev(adev), | 2006 | amba_part(adev), amba_rev(adev), |
2019 | (unsigned long long)adev->res.start, adev->irq[0]); | 2007 | (unsigned long long)adev->res.start, adev->irq[0]); |
2020 | |||
2021 | return 0; | 2008 | return 0; |
2022 | 2009 | ||
2023 | out_no_slave_reg: | 2010 | out_no_slave_reg: |
@@ -2048,12 +2035,6 @@ static struct vendor_data vendor_pl080 = { | |||
2048 | .dualmaster = true, | 2035 | .dualmaster = true, |
2049 | }; | 2036 | }; |
2050 | 2037 | ||
2051 | static struct vendor_data vendor_nomadik = { | ||
2052 | .channels = 8, | ||
2053 | .dualmaster = true, | ||
2054 | .nomadik = true, | ||
2055 | }; | ||
2056 | |||
2057 | static struct vendor_data vendor_pl081 = { | 2038 | static struct vendor_data vendor_pl081 = { |
2058 | .channels = 2, | 2039 | .channels = 2, |
2059 | .dualmaster = false, | 2040 | .dualmaster = false, |
@@ -2074,15 +2055,13 @@ static struct amba_id pl08x_ids[] = { | |||
2074 | }, | 2055 | }, |
2075 | /* Nomadik 8815 PL080 variant */ | 2056 | /* Nomadik 8815 PL080 variant */ |
2076 | { | 2057 | { |
2077 | .id = 0x00280080, | 2058 | .id = 0x00280880, |
2078 | .mask = 0x00ffffff, | 2059 | .mask = 0x00ffffff, |
2079 | .data = &vendor_nomadik, | 2060 | .data = &vendor_pl080, |
2080 | }, | 2061 | }, |
2081 | { 0, 0 }, | 2062 | { 0, 0 }, |
2082 | }; | 2063 | }; |
2083 | 2064 | ||
2084 | MODULE_DEVICE_TABLE(amba, pl08x_ids); | ||
2085 | |||
2086 | static struct amba_driver pl08x_amba_driver = { | 2065 | static struct amba_driver pl08x_amba_driver = { |
2087 | .drv.name = DRIVER_NAME, | 2066 | .drv.name = DRIVER_NAME, |
2088 | .id_table = pl08x_ids, | 2067 | .id_table = pl08x_ids, |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 13a02f4425b..6a483eac7b3 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -9,9 +9,10 @@ | |||
9 | * (at your option) any later version. | 9 | * (at your option) any later version. |
10 | * | 10 | * |
11 | * | 11 | * |
12 | * This supports the Atmel AHB DMA Controller found in several Atmel SoCs. | 12 | * This supports the Atmel AHB DMA Controller, |
13 | * The only Atmel DMA Controller that is not covered by this driver is the one | 13 | * |
14 | * found on AT91SAM9263. | 14 | * The driver has currently been tested with the Atmel AT91SAM9RL |
15 | * and AT91SAM9G45 series. | ||
15 | */ | 16 | */ |
16 | 17 | ||
17 | #include <linux/clk.h> | 18 | #include <linux/clk.h> |
@@ -22,11 +23,8 @@ | |||
22 | #include <linux/module.h> | 23 | #include <linux/module.h> |
23 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
24 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
25 | #include <linux/of.h> | ||
26 | #include <linux/of_device.h> | ||
27 | 26 | ||
28 | #include "at_hdmac_regs.h" | 27 | #include "at_hdmac_regs.h" |
29 | #include "dmaengine.h" | ||
30 | 28 | ||
31 | /* | 29 | /* |
32 | * Glossary | 30 | * Glossary |
@@ -38,6 +36,7 @@ | |||
38 | */ | 36 | */ |
39 | 37 | ||
40 | #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) | 38 | #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) |
39 | #define ATC_DEFAULT_CTRLA (0) | ||
41 | #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ | 40 | #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ |
42 | |ATC_DIF(AT_DMA_MEM_IF)) | 41 | |ATC_DIF(AT_DMA_MEM_IF)) |
43 | 42 | ||
@@ -108,11 +107,10 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) | |||
108 | { | 107 | { |
109 | struct at_desc *desc, *_desc; | 108 | struct at_desc *desc, *_desc; |
110 | struct at_desc *ret = NULL; | 109 | struct at_desc *ret = NULL; |
111 | unsigned long flags; | ||
112 | unsigned int i = 0; | 110 | unsigned int i = 0; |
113 | LIST_HEAD(tmp_list); | 111 | LIST_HEAD(tmp_list); |
114 | 112 | ||
115 | spin_lock_irqsave(&atchan->lock, flags); | 113 | spin_lock_bh(&atchan->lock); |
116 | list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { | 114 | list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { |
117 | i++; | 115 | i++; |
118 | if (async_tx_test_ack(&desc->txd)) { | 116 | if (async_tx_test_ack(&desc->txd)) { |
@@ -123,7 +121,7 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) | |||
123 | dev_dbg(chan2dev(&atchan->chan_common), | 121 | dev_dbg(chan2dev(&atchan->chan_common), |
124 | "desc %p not ACKed\n", desc); | 122 | "desc %p not ACKed\n", desc); |
125 | } | 123 | } |
126 | spin_unlock_irqrestore(&atchan->lock, flags); | 124 | spin_unlock_bh(&atchan->lock); |
127 | dev_vdbg(chan2dev(&atchan->chan_common), | 125 | dev_vdbg(chan2dev(&atchan->chan_common), |
128 | "scanned %u descriptors on freelist\n", i); | 126 | "scanned %u descriptors on freelist\n", i); |
129 | 127 | ||
@@ -131,9 +129,9 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) | |||
131 | if (!ret) { | 129 | if (!ret) { |
132 | ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); | 130 | ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); |
133 | if (ret) { | 131 | if (ret) { |
134 | spin_lock_irqsave(&atchan->lock, flags); | 132 | spin_lock_bh(&atchan->lock); |
135 | atchan->descs_allocated++; | 133 | atchan->descs_allocated++; |
136 | spin_unlock_irqrestore(&atchan->lock, flags); | 134 | spin_unlock_bh(&atchan->lock); |
137 | } else { | 135 | } else { |
138 | dev_err(chan2dev(&atchan->chan_common), | 136 | dev_err(chan2dev(&atchan->chan_common), |
139 | "not enough descriptors available\n"); | 137 | "not enough descriptors available\n"); |
@@ -152,9 +150,8 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) | |||
152 | { | 150 | { |
153 | if (desc) { | 151 | if (desc) { |
154 | struct at_desc *child; | 152 | struct at_desc *child; |
155 | unsigned long flags; | ||
156 | 153 | ||
157 | spin_lock_irqsave(&atchan->lock, flags); | 154 | spin_lock_bh(&atchan->lock); |
158 | list_for_each_entry(child, &desc->tx_list, desc_node) | 155 | list_for_each_entry(child, &desc->tx_list, desc_node) |
159 | dev_vdbg(chan2dev(&atchan->chan_common), | 156 | dev_vdbg(chan2dev(&atchan->chan_common), |
160 | "moving child desc %p to freelist\n", | 157 | "moving child desc %p to freelist\n", |
@@ -163,14 +160,14 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) | |||
163 | dev_vdbg(chan2dev(&atchan->chan_common), | 160 | dev_vdbg(chan2dev(&atchan->chan_common), |
164 | "moving desc %p to freelist\n", desc); | 161 | "moving desc %p to freelist\n", desc); |
165 | list_add(&desc->desc_node, &atchan->free_list); | 162 | list_add(&desc->desc_node, &atchan->free_list); |
166 | spin_unlock_irqrestore(&atchan->lock, flags); | 163 | spin_unlock_bh(&atchan->lock); |
167 | } | 164 | } |
168 | } | 165 | } |
169 | 166 | ||
170 | /** | 167 | /** |
171 | * atc_desc_chain - build chain adding a descriptor | 168 | * atc_desc_chain - build chain adding a descripor |
172 | * @first: address of first descriptor of the chain | 169 | * @first: address of first descripor of the chain |
173 | * @prev: address of previous descriptor of the chain | 170 | * @prev: address of previous descripor of the chain |
174 | * @desc: descriptor to queue | 171 | * @desc: descriptor to queue |
175 | * | 172 | * |
176 | * Called from prep_* functions | 173 | * Called from prep_* functions |
@@ -191,6 +188,27 @@ static void atc_desc_chain(struct at_desc **first, struct at_desc **prev, | |||
191 | } | 188 | } |
192 | 189 | ||
193 | /** | 190 | /** |
191 | * atc_assign_cookie - compute and assign new cookie | ||
192 | * @atchan: channel we work on | ||
193 | * @desc: descriptor to assign cookie for | ||
194 | * | ||
195 | * Called with atchan->lock held and bh disabled | ||
196 | */ | ||
197 | static dma_cookie_t | ||
198 | atc_assign_cookie(struct at_dma_chan *atchan, struct at_desc *desc) | ||
199 | { | ||
200 | dma_cookie_t cookie = atchan->chan_common.cookie; | ||
201 | |||
202 | if (++cookie < 0) | ||
203 | cookie = 1; | ||
204 | |||
205 | atchan->chan_common.cookie = cookie; | ||
206 | desc->txd.cookie = cookie; | ||
207 | |||
208 | return cookie; | ||
209 | } | ||
210 | |||
211 | /** | ||
194 | * atc_dostart - starts the DMA engine for real | 212 | * atc_dostart - starts the DMA engine for real |
195 | * @atchan: the channel we want to start | 213 | * @atchan: the channel we want to start |
196 | * @first: first descriptor in the list we want to begin with | 214 | * @first: first descriptor in the list we want to begin with |
@@ -219,6 +237,10 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) | |||
219 | 237 | ||
220 | vdbg_dump_regs(atchan); | 238 | vdbg_dump_regs(atchan); |
221 | 239 | ||
240 | /* clear any pending interrupt */ | ||
241 | while (dma_readl(atdma, EBCISR)) | ||
242 | cpu_relax(); | ||
243 | |||
222 | channel_writel(atchan, SADDR, 0); | 244 | channel_writel(atchan, SADDR, 0); |
223 | channel_writel(atchan, DADDR, 0); | 245 | channel_writel(atchan, DADDR, 0); |
224 | channel_writel(atchan, CTRLA, 0); | 246 | channel_writel(atchan, CTRLA, 0); |
@@ -243,9 +265,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) | |||
243 | dev_vdbg(chan2dev(&atchan->chan_common), | 265 | dev_vdbg(chan2dev(&atchan->chan_common), |
244 | "descriptor %u complete\n", txd->cookie); | 266 | "descriptor %u complete\n", txd->cookie); |
245 | 267 | ||
246 | /* mark the descriptor as complete for non cyclic cases only */ | 268 | atchan->completed_cookie = txd->cookie; |
247 | if (!atc_chan_is_cyclic(atchan)) | ||
248 | dma_cookie_complete(txd); | ||
249 | 269 | ||
250 | /* move children to free_list */ | 270 | /* move children to free_list */ |
251 | list_splice_init(&desc->tx_list, &atchan->free_list); | 271 | list_splice_init(&desc->tx_list, &atchan->free_list); |
@@ -279,7 +299,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) | |||
279 | 299 | ||
280 | /* for cyclic transfers, | 300 | /* for cyclic transfers, |
281 | * no need to replay callback function while stopping */ | 301 | * no need to replay callback function while stopping */ |
282 | if (!atc_chan_is_cyclic(atchan)) { | 302 | if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) { |
283 | dma_async_tx_callback callback = txd->callback; | 303 | dma_async_tx_callback callback = txd->callback; |
284 | void *param = txd->callback_param; | 304 | void *param = txd->callback_param; |
285 | 305 | ||
@@ -451,17 +471,16 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan) | |||
451 | static void atc_tasklet(unsigned long data) | 471 | static void atc_tasklet(unsigned long data) |
452 | { | 472 | { |
453 | struct at_dma_chan *atchan = (struct at_dma_chan *)data; | 473 | struct at_dma_chan *atchan = (struct at_dma_chan *)data; |
454 | unsigned long flags; | ||
455 | 474 | ||
456 | spin_lock_irqsave(&atchan->lock, flags); | 475 | spin_lock(&atchan->lock); |
457 | if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) | 476 | if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) |
458 | atc_handle_error(atchan); | 477 | atc_handle_error(atchan); |
459 | else if (atc_chan_is_cyclic(atchan)) | 478 | else if (test_bit(ATC_IS_CYCLIC, &atchan->status)) |
460 | atc_handle_cyclic(atchan); | 479 | atc_handle_cyclic(atchan); |
461 | else | 480 | else |
462 | atc_advance_work(atchan); | 481 | atc_advance_work(atchan); |
463 | 482 | ||
464 | spin_unlock_irqrestore(&atchan->lock, flags); | 483 | spin_unlock(&atchan->lock); |
465 | } | 484 | } |
466 | 485 | ||
467 | static irqreturn_t at_dma_interrupt(int irq, void *dev_id) | 486 | static irqreturn_t at_dma_interrupt(int irq, void *dev_id) |
@@ -520,10 +539,9 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) | |||
520 | struct at_desc *desc = txd_to_at_desc(tx); | 539 | struct at_desc *desc = txd_to_at_desc(tx); |
521 | struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); | 540 | struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); |
522 | dma_cookie_t cookie; | 541 | dma_cookie_t cookie; |
523 | unsigned long flags; | ||
524 | 542 | ||
525 | spin_lock_irqsave(&atchan->lock, flags); | 543 | spin_lock_bh(&atchan->lock); |
526 | cookie = dma_cookie_assign(tx); | 544 | cookie = atc_assign_cookie(atchan, desc); |
527 | 545 | ||
528 | if (list_empty(&atchan->active_list)) { | 546 | if (list_empty(&atchan->active_list)) { |
529 | dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", | 547 | dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", |
@@ -536,7 +554,7 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) | |||
536 | list_add_tail(&desc->desc_node, &atchan->queue); | 554 | list_add_tail(&desc->desc_node, &atchan->queue); |
537 | } | 555 | } |
538 | 556 | ||
539 | spin_unlock_irqrestore(&atchan->lock, flags); | 557 | spin_unlock_bh(&atchan->lock); |
540 | 558 | ||
541 | return cookie; | 559 | return cookie; |
542 | } | 560 | } |
@@ -572,6 +590,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
572 | return NULL; | 590 | return NULL; |
573 | } | 591 | } |
574 | 592 | ||
593 | ctrla = ATC_DEFAULT_CTRLA; | ||
575 | ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN | 594 | ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN |
576 | | ATC_SRC_ADDR_MODE_INCR | 595 | | ATC_SRC_ADDR_MODE_INCR |
577 | | ATC_DST_ADDR_MODE_INCR | 596 | | ATC_DST_ADDR_MODE_INCR |
@@ -582,13 +601,13 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
582 | * of the most common optimization. | 601 | * of the most common optimization. |
583 | */ | 602 | */ |
584 | if (!((src | dest | len) & 3)) { | 603 | if (!((src | dest | len) & 3)) { |
585 | ctrla = ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD; | 604 | ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD; |
586 | src_width = dst_width = 2; | 605 | src_width = dst_width = 2; |
587 | } else if (!((src | dest | len) & 1)) { | 606 | } else if (!((src | dest | len) & 1)) { |
588 | ctrla = ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD; | 607 | ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD; |
589 | src_width = dst_width = 1; | 608 | src_width = dst_width = 1; |
590 | } else { | 609 | } else { |
591 | ctrla = ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE; | 610 | ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE; |
592 | src_width = dst_width = 0; | 611 | src_width = dst_width = 0; |
593 | } | 612 | } |
594 | 613 | ||
@@ -634,16 +653,14 @@ err_desc_get: | |||
634 | * @sg_len: number of entries in @scatterlist | 653 | * @sg_len: number of entries in @scatterlist |
635 | * @direction: DMA direction | 654 | * @direction: DMA direction |
636 | * @flags: tx descriptor status flags | 655 | * @flags: tx descriptor status flags |
637 | * @context: transaction context (ignored) | ||
638 | */ | 656 | */ |
639 | static struct dma_async_tx_descriptor * | 657 | static struct dma_async_tx_descriptor * |
640 | atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 658 | atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
641 | unsigned int sg_len, enum dma_transfer_direction direction, | 659 | unsigned int sg_len, enum dma_data_direction direction, |
642 | unsigned long flags, void *context) | 660 | unsigned long flags) |
643 | { | 661 | { |
644 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 662 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
645 | struct at_dma_slave *atslave = chan->private; | 663 | struct at_dma_slave *atslave = chan->private; |
646 | struct dma_slave_config *sconfig = &atchan->dma_sconfig; | ||
647 | struct at_desc *first = NULL; | 664 | struct at_desc *first = NULL; |
648 | struct at_desc *prev = NULL; | 665 | struct at_desc *prev = NULL; |
649 | u32 ctrla; | 666 | u32 ctrla; |
@@ -657,27 +674,27 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
657 | 674 | ||
658 | dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n", | 675 | dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n", |
659 | sg_len, | 676 | sg_len, |
660 | direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", | 677 | direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", |
661 | flags); | 678 | flags); |
662 | 679 | ||
663 | if (unlikely(!atslave || !sg_len)) { | 680 | if (unlikely(!atslave || !sg_len)) { |
664 | dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n"); | 681 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); |
665 | return NULL; | 682 | return NULL; |
666 | } | 683 | } |
667 | 684 | ||
668 | ctrla = ATC_SCSIZE(sconfig->src_maxburst) | 685 | reg_width = atslave->reg_width; |
669 | | ATC_DCSIZE(sconfig->dst_maxburst); | 686 | |
687 | ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; | ||
670 | ctrlb = ATC_IEN; | 688 | ctrlb = ATC_IEN; |
671 | 689 | ||
672 | switch (direction) { | 690 | switch (direction) { |
673 | case DMA_MEM_TO_DEV: | 691 | case DMA_TO_DEVICE: |
674 | reg_width = convert_buswidth(sconfig->dst_addr_width); | ||
675 | ctrla |= ATC_DST_WIDTH(reg_width); | 692 | ctrla |= ATC_DST_WIDTH(reg_width); |
676 | ctrlb |= ATC_DST_ADDR_MODE_FIXED | 693 | ctrlb |= ATC_DST_ADDR_MODE_FIXED |
677 | | ATC_SRC_ADDR_MODE_INCR | 694 | | ATC_SRC_ADDR_MODE_INCR |
678 | | ATC_FC_MEM2PER | 695 | | ATC_FC_MEM2PER |
679 | | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF); | 696 | | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF); |
680 | reg = sconfig->dst_addr; | 697 | reg = atslave->tx_reg; |
681 | for_each_sg(sgl, sg, sg_len, i) { | 698 | for_each_sg(sgl, sg, sg_len, i) { |
682 | struct at_desc *desc; | 699 | struct at_desc *desc; |
683 | u32 len; | 700 | u32 len; |
@@ -689,11 +706,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
689 | 706 | ||
690 | mem = sg_dma_address(sg); | 707 | mem = sg_dma_address(sg); |
691 | len = sg_dma_len(sg); | 708 | len = sg_dma_len(sg); |
692 | if (unlikely(!len)) { | ||
693 | dev_dbg(chan2dev(chan), | ||
694 | "prep_slave_sg: sg(%d) data length is zero\n", i); | ||
695 | goto err; | ||
696 | } | ||
697 | mem_width = 2; | 709 | mem_width = 2; |
698 | if (unlikely(mem & 3 || len & 3)) | 710 | if (unlikely(mem & 3 || len & 3)) |
699 | mem_width = 0; | 711 | mem_width = 0; |
@@ -709,15 +721,14 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
709 | total_len += len; | 721 | total_len += len; |
710 | } | 722 | } |
711 | break; | 723 | break; |
712 | case DMA_DEV_TO_MEM: | 724 | case DMA_FROM_DEVICE: |
713 | reg_width = convert_buswidth(sconfig->src_addr_width); | ||
714 | ctrla |= ATC_SRC_WIDTH(reg_width); | 725 | ctrla |= ATC_SRC_WIDTH(reg_width); |
715 | ctrlb |= ATC_DST_ADDR_MODE_INCR | 726 | ctrlb |= ATC_DST_ADDR_MODE_INCR |
716 | | ATC_SRC_ADDR_MODE_FIXED | 727 | | ATC_SRC_ADDR_MODE_FIXED |
717 | | ATC_FC_PER2MEM | 728 | | ATC_FC_PER2MEM |
718 | | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF); | 729 | | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF); |
719 | 730 | ||
720 | reg = sconfig->src_addr; | 731 | reg = atslave->rx_reg; |
721 | for_each_sg(sgl, sg, sg_len, i) { | 732 | for_each_sg(sgl, sg, sg_len, i) { |
722 | struct at_desc *desc; | 733 | struct at_desc *desc; |
723 | u32 len; | 734 | u32 len; |
@@ -729,11 +740,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
729 | 740 | ||
730 | mem = sg_dma_address(sg); | 741 | mem = sg_dma_address(sg); |
731 | len = sg_dma_len(sg); | 742 | len = sg_dma_len(sg); |
732 | if (unlikely(!len)) { | ||
733 | dev_dbg(chan2dev(chan), | ||
734 | "prep_slave_sg: sg(%d) data length is zero\n", i); | ||
735 | goto err; | ||
736 | } | ||
737 | mem_width = 2; | 743 | mem_width = 2; |
738 | if (unlikely(mem & 3 || len & 3)) | 744 | if (unlikely(mem & 3 || len & 3)) |
739 | mem_width = 0; | 745 | mem_width = 0; |
@@ -767,7 +773,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
767 | 773 | ||
768 | err_desc_get: | 774 | err_desc_get: |
769 | dev_err(chan2dev(chan), "not enough descriptors available\n"); | 775 | dev_err(chan2dev(chan), "not enough descriptors available\n"); |
770 | err: | ||
771 | atc_desc_put(atchan, first); | 776 | atc_desc_put(atchan, first); |
772 | return NULL; | 777 | return NULL; |
773 | } | 778 | } |
@@ -778,7 +783,7 @@ err: | |||
778 | */ | 783 | */ |
779 | static int | 784 | static int |
780 | atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, | 785 | atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, |
781 | size_t period_len, enum dma_transfer_direction direction) | 786 | size_t period_len, enum dma_data_direction direction) |
782 | { | 787 | { |
783 | if (period_len > (ATC_BTSIZE_MAX << reg_width)) | 788 | if (period_len > (ATC_BTSIZE_MAX << reg_width)) |
784 | goto err_out; | 789 | goto err_out; |
@@ -786,7 +791,7 @@ atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, | |||
786 | goto err_out; | 791 | goto err_out; |
787 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) | 792 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) |
788 | goto err_out; | 793 | goto err_out; |
789 | if (unlikely(!(direction & (DMA_DEV_TO_MEM | DMA_MEM_TO_DEV)))) | 794 | if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE)))) |
790 | goto err_out; | 795 | goto err_out; |
791 | 796 | ||
792 | return 0; | 797 | return 0; |
@@ -796,29 +801,26 @@ err_out: | |||
796 | } | 801 | } |
797 | 802 | ||
798 | /** | 803 | /** |
799 | * atc_dma_cyclic_fill_desc - Fill one period descriptor | 804 | * atc_dma_cyclic_fill_desc - Fill one period decriptor |
800 | */ | 805 | */ |
801 | static int | 806 | static int |
802 | atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, | 807 | atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, |
803 | unsigned int period_index, dma_addr_t buf_addr, | 808 | unsigned int period_index, dma_addr_t buf_addr, |
804 | unsigned int reg_width, size_t period_len, | 809 | size_t period_len, enum dma_data_direction direction) |
805 | enum dma_transfer_direction direction) | ||
806 | { | 810 | { |
807 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 811 | u32 ctrla; |
808 | struct dma_slave_config *sconfig = &atchan->dma_sconfig; | 812 | unsigned int reg_width = atslave->reg_width; |
809 | u32 ctrla; | ||
810 | 813 | ||
811 | /* prepare common CRTLA value */ | 814 | /* prepare common CRTLA value */ |
812 | ctrla = ATC_SCSIZE(sconfig->src_maxburst) | 815 | ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla |
813 | | ATC_DCSIZE(sconfig->dst_maxburst) | ||
814 | | ATC_DST_WIDTH(reg_width) | 816 | | ATC_DST_WIDTH(reg_width) |
815 | | ATC_SRC_WIDTH(reg_width) | 817 | | ATC_SRC_WIDTH(reg_width) |
816 | | period_len >> reg_width; | 818 | | period_len >> reg_width; |
817 | 819 | ||
818 | switch (direction) { | 820 | switch (direction) { |
819 | case DMA_MEM_TO_DEV: | 821 | case DMA_TO_DEVICE: |
820 | desc->lli.saddr = buf_addr + (period_len * period_index); | 822 | desc->lli.saddr = buf_addr + (period_len * period_index); |
821 | desc->lli.daddr = sconfig->dst_addr; | 823 | desc->lli.daddr = atslave->tx_reg; |
822 | desc->lli.ctrla = ctrla; | 824 | desc->lli.ctrla = ctrla; |
823 | desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED | 825 | desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED |
824 | | ATC_SRC_ADDR_MODE_INCR | 826 | | ATC_SRC_ADDR_MODE_INCR |
@@ -827,8 +829,8 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, | |||
827 | | ATC_DIF(AT_DMA_PER_IF); | 829 | | ATC_DIF(AT_DMA_PER_IF); |
828 | break; | 830 | break; |
829 | 831 | ||
830 | case DMA_DEV_TO_MEM: | 832 | case DMA_FROM_DEVICE: |
831 | desc->lli.saddr = sconfig->src_addr; | 833 | desc->lli.saddr = atslave->rx_reg; |
832 | desc->lli.daddr = buf_addr + (period_len * period_index); | 834 | desc->lli.daddr = buf_addr + (period_len * period_index); |
833 | desc->lli.ctrla = ctrla; | 835 | desc->lli.ctrla = ctrla; |
834 | desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR | 836 | desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR |
@@ -852,26 +854,21 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, | |||
852 | * @buf_len: total number of bytes for the entire buffer | 854 | * @buf_len: total number of bytes for the entire buffer |
853 | * @period_len: number of bytes for each period | 855 | * @period_len: number of bytes for each period |
854 | * @direction: transfer direction, to or from device | 856 | * @direction: transfer direction, to or from device |
855 | * @flags: tx descriptor status flags | ||
856 | * @context: transfer context (ignored) | ||
857 | */ | 857 | */ |
858 | static struct dma_async_tx_descriptor * | 858 | static struct dma_async_tx_descriptor * |
859 | atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | 859 | atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
860 | size_t period_len, enum dma_transfer_direction direction, | 860 | size_t period_len, enum dma_data_direction direction) |
861 | unsigned long flags, void *context) | ||
862 | { | 861 | { |
863 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 862 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
864 | struct at_dma_slave *atslave = chan->private; | 863 | struct at_dma_slave *atslave = chan->private; |
865 | struct dma_slave_config *sconfig = &atchan->dma_sconfig; | ||
866 | struct at_desc *first = NULL; | 864 | struct at_desc *first = NULL; |
867 | struct at_desc *prev = NULL; | 865 | struct at_desc *prev = NULL; |
868 | unsigned long was_cyclic; | 866 | unsigned long was_cyclic; |
869 | unsigned int reg_width; | ||
870 | unsigned int periods = buf_len / period_len; | 867 | unsigned int periods = buf_len / period_len; |
871 | unsigned int i; | 868 | unsigned int i; |
872 | 869 | ||
873 | dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", | 870 | dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", |
874 | direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", | 871 | direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", |
875 | buf_addr, | 872 | buf_addr, |
876 | periods, buf_len, period_len); | 873 | periods, buf_len, period_len); |
877 | 874 | ||
@@ -886,13 +883,8 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | |||
886 | return NULL; | 883 | return NULL; |
887 | } | 884 | } |
888 | 885 | ||
889 | if (sconfig->direction == DMA_MEM_TO_DEV) | ||
890 | reg_width = convert_buswidth(sconfig->dst_addr_width); | ||
891 | else | ||
892 | reg_width = convert_buswidth(sconfig->src_addr_width); | ||
893 | |||
894 | /* Check for too big/unaligned periods and unaligned DMA buffer */ | 886 | /* Check for too big/unaligned periods and unaligned DMA buffer */ |
895 | if (atc_dma_cyclic_check_values(reg_width, buf_addr, | 887 | if (atc_dma_cyclic_check_values(atslave->reg_width, buf_addr, |
896 | period_len, direction)) | 888 | period_len, direction)) |
897 | goto err_out; | 889 | goto err_out; |
898 | 890 | ||
@@ -904,8 +896,8 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | |||
904 | if (!desc) | 896 | if (!desc) |
905 | goto err_desc_get; | 897 | goto err_desc_get; |
906 | 898 | ||
907 | if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr, | 899 | if (atc_dma_cyclic_fill_desc(atslave, desc, i, buf_addr, |
908 | reg_width, period_len, direction)) | 900 | period_len, direction)) |
909 | goto err_desc_get; | 901 | goto err_desc_get; |
910 | 902 | ||
911 | atc_desc_chain(&first, &prev, desc); | 903 | atc_desc_chain(&first, &prev, desc); |
@@ -928,23 +920,6 @@ err_out: | |||
928 | return NULL; | 920 | return NULL; |
929 | } | 921 | } |
930 | 922 | ||
931 | static int set_runtime_config(struct dma_chan *chan, | ||
932 | struct dma_slave_config *sconfig) | ||
933 | { | ||
934 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | ||
935 | |||
936 | /* Check if it is chan is configured for slave transfers */ | ||
937 | if (!chan->private) | ||
938 | return -EINVAL; | ||
939 | |||
940 | memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig)); | ||
941 | |||
942 | convert_burst(&atchan->dma_sconfig.src_maxburst); | ||
943 | convert_burst(&atchan->dma_sconfig.dst_maxburst); | ||
944 | |||
945 | return 0; | ||
946 | } | ||
947 | |||
948 | 923 | ||
949 | static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 924 | static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
950 | unsigned long arg) | 925 | unsigned long arg) |
@@ -952,29 +927,28 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
952 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 927 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
953 | struct at_dma *atdma = to_at_dma(chan->device); | 928 | struct at_dma *atdma = to_at_dma(chan->device); |
954 | int chan_id = atchan->chan_common.chan_id; | 929 | int chan_id = atchan->chan_common.chan_id; |
955 | unsigned long flags; | ||
956 | 930 | ||
957 | LIST_HEAD(list); | 931 | LIST_HEAD(list); |
958 | 932 | ||
959 | dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); | 933 | dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); |
960 | 934 | ||
961 | if (cmd == DMA_PAUSE) { | 935 | if (cmd == DMA_PAUSE) { |
962 | spin_lock_irqsave(&atchan->lock, flags); | 936 | spin_lock_bh(&atchan->lock); |
963 | 937 | ||
964 | dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); | 938 | dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); |
965 | set_bit(ATC_IS_PAUSED, &atchan->status); | 939 | set_bit(ATC_IS_PAUSED, &atchan->status); |
966 | 940 | ||
967 | spin_unlock_irqrestore(&atchan->lock, flags); | 941 | spin_unlock_bh(&atchan->lock); |
968 | } else if (cmd == DMA_RESUME) { | 942 | } else if (cmd == DMA_RESUME) { |
969 | if (!atc_chan_is_paused(atchan)) | 943 | if (!test_bit(ATC_IS_PAUSED, &atchan->status)) |
970 | return 0; | 944 | return 0; |
971 | 945 | ||
972 | spin_lock_irqsave(&atchan->lock, flags); | 946 | spin_lock_bh(&atchan->lock); |
973 | 947 | ||
974 | dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); | 948 | dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); |
975 | clear_bit(ATC_IS_PAUSED, &atchan->status); | 949 | clear_bit(ATC_IS_PAUSED, &atchan->status); |
976 | 950 | ||
977 | spin_unlock_irqrestore(&atchan->lock, flags); | 951 | spin_unlock_bh(&atchan->lock); |
978 | } else if (cmd == DMA_TERMINATE_ALL) { | 952 | } else if (cmd == DMA_TERMINATE_ALL) { |
979 | struct at_desc *desc, *_desc; | 953 | struct at_desc *desc, *_desc; |
980 | /* | 954 | /* |
@@ -983,7 +957,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
983 | * channel. We still have to poll the channel enable bit due | 957 | * channel. We still have to poll the channel enable bit due |
984 | * to AHB/HSB limitations. | 958 | * to AHB/HSB limitations. |
985 | */ | 959 | */ |
986 | spin_lock_irqsave(&atchan->lock, flags); | 960 | spin_lock_bh(&atchan->lock); |
987 | 961 | ||
988 | /* disabling channel: must also remove suspend state */ | 962 | /* disabling channel: must also remove suspend state */ |
989 | dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); | 963 | dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); |
@@ -1004,9 +978,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1004 | /* if channel dedicated to cyclic operations, free it */ | 978 | /* if channel dedicated to cyclic operations, free it */ |
1005 | clear_bit(ATC_IS_CYCLIC, &atchan->status); | 979 | clear_bit(ATC_IS_CYCLIC, &atchan->status); |
1006 | 980 | ||
1007 | spin_unlock_irqrestore(&atchan->lock, flags); | 981 | spin_unlock_bh(&atchan->lock); |
1008 | } else if (cmd == DMA_SLAVE_CONFIG) { | ||
1009 | return set_runtime_config(chan, (struct dma_slave_config *)arg); | ||
1010 | } else { | 982 | } else { |
1011 | return -ENXIO; | 983 | return -ENXIO; |
1012 | } | 984 | } |
@@ -1032,27 +1004,32 @@ atc_tx_status(struct dma_chan *chan, | |||
1032 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 1004 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
1033 | dma_cookie_t last_used; | 1005 | dma_cookie_t last_used; |
1034 | dma_cookie_t last_complete; | 1006 | dma_cookie_t last_complete; |
1035 | unsigned long flags; | ||
1036 | enum dma_status ret; | 1007 | enum dma_status ret; |
1037 | 1008 | ||
1038 | spin_lock_irqsave(&atchan->lock, flags); | 1009 | spin_lock_bh(&atchan->lock); |
1039 | 1010 | ||
1040 | ret = dma_cookie_status(chan, cookie, txstate); | 1011 | last_complete = atchan->completed_cookie; |
1012 | last_used = chan->cookie; | ||
1013 | |||
1014 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
1041 | if (ret != DMA_SUCCESS) { | 1015 | if (ret != DMA_SUCCESS) { |
1042 | atc_cleanup_descriptors(atchan); | 1016 | atc_cleanup_descriptors(atchan); |
1043 | 1017 | ||
1044 | ret = dma_cookie_status(chan, cookie, txstate); | 1018 | last_complete = atchan->completed_cookie; |
1045 | } | 1019 | last_used = chan->cookie; |
1046 | 1020 | ||
1047 | last_complete = chan->completed_cookie; | 1021 | ret = dma_async_is_complete(cookie, last_complete, last_used); |
1048 | last_used = chan->cookie; | 1022 | } |
1049 | 1023 | ||
1050 | spin_unlock_irqrestore(&atchan->lock, flags); | 1024 | spin_unlock_bh(&atchan->lock); |
1051 | 1025 | ||
1052 | if (ret != DMA_SUCCESS) | 1026 | if (ret != DMA_SUCCESS) |
1053 | dma_set_residue(txstate, atc_first_active(atchan)->len); | 1027 | dma_set_tx_state(txstate, last_complete, last_used, |
1028 | atc_first_active(atchan)->len); | ||
1029 | else | ||
1030 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
1054 | 1031 | ||
1055 | if (atc_chan_is_paused(atchan)) | 1032 | if (test_bit(ATC_IS_PAUSED, &atchan->status)) |
1056 | ret = DMA_PAUSED; | 1033 | ret = DMA_PAUSED; |
1057 | 1034 | ||
1058 | dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n", | 1035 | dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n", |
@@ -1069,19 +1046,18 @@ atc_tx_status(struct dma_chan *chan, | |||
1069 | static void atc_issue_pending(struct dma_chan *chan) | 1046 | static void atc_issue_pending(struct dma_chan *chan) |
1070 | { | 1047 | { |
1071 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 1048 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
1072 | unsigned long flags; | ||
1073 | 1049 | ||
1074 | dev_vdbg(chan2dev(chan), "issue_pending\n"); | 1050 | dev_vdbg(chan2dev(chan), "issue_pending\n"); |
1075 | 1051 | ||
1076 | /* Not needed for cyclic transfers */ | 1052 | /* Not needed for cyclic transfers */ |
1077 | if (atc_chan_is_cyclic(atchan)) | 1053 | if (test_bit(ATC_IS_CYCLIC, &atchan->status)) |
1078 | return; | 1054 | return; |
1079 | 1055 | ||
1080 | spin_lock_irqsave(&atchan->lock, flags); | 1056 | spin_lock_bh(&atchan->lock); |
1081 | if (!atc_chan_is_enabled(atchan)) { | 1057 | if (!atc_chan_is_enabled(atchan)) { |
1082 | atc_advance_work(atchan); | 1058 | atc_advance_work(atchan); |
1083 | } | 1059 | } |
1084 | spin_unlock_irqrestore(&atchan->lock, flags); | 1060 | spin_unlock_bh(&atchan->lock); |
1085 | } | 1061 | } |
1086 | 1062 | ||
1087 | /** | 1063 | /** |
@@ -1097,7 +1073,6 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) | |||
1097 | struct at_dma *atdma = to_at_dma(chan->device); | 1073 | struct at_dma *atdma = to_at_dma(chan->device); |
1098 | struct at_desc *desc; | 1074 | struct at_desc *desc; |
1099 | struct at_dma_slave *atslave; | 1075 | struct at_dma_slave *atslave; |
1100 | unsigned long flags; | ||
1101 | int i; | 1076 | int i; |
1102 | u32 cfg; | 1077 | u32 cfg; |
1103 | LIST_HEAD(tmp_list); | 1078 | LIST_HEAD(tmp_list); |
@@ -1141,11 +1116,11 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) | |||
1141 | list_add_tail(&desc->desc_node, &tmp_list); | 1116 | list_add_tail(&desc->desc_node, &tmp_list); |
1142 | } | 1117 | } |
1143 | 1118 | ||
1144 | spin_lock_irqsave(&atchan->lock, flags); | 1119 | spin_lock_bh(&atchan->lock); |
1145 | atchan->descs_allocated = i; | 1120 | atchan->descs_allocated = i; |
1146 | list_splice(&tmp_list, &atchan->free_list); | 1121 | list_splice(&tmp_list, &atchan->free_list); |
1147 | dma_cookie_init(chan); | 1122 | atchan->completed_cookie = chan->cookie = 1; |
1148 | spin_unlock_irqrestore(&atchan->lock, flags); | 1123 | spin_unlock_bh(&atchan->lock); |
1149 | 1124 | ||
1150 | /* channel parameters */ | 1125 | /* channel parameters */ |
1151 | channel_writel(atchan, CFG, cfg); | 1126 | channel_writel(atchan, CFG, cfg); |
@@ -1192,56 +1167,6 @@ static void atc_free_chan_resources(struct dma_chan *chan) | |||
1192 | 1167 | ||
1193 | /*-- Module Management -----------------------------------------------*/ | 1168 | /*-- Module Management -----------------------------------------------*/ |
1194 | 1169 | ||
1195 | /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */ | ||
1196 | static struct at_dma_platform_data at91sam9rl_config = { | ||
1197 | .nr_channels = 2, | ||
1198 | }; | ||
1199 | static struct at_dma_platform_data at91sam9g45_config = { | ||
1200 | .nr_channels = 8, | ||
1201 | }; | ||
1202 | |||
1203 | #if defined(CONFIG_OF) | ||
1204 | static const struct of_device_id atmel_dma_dt_ids[] = { | ||
1205 | { | ||
1206 | .compatible = "atmel,at91sam9rl-dma", | ||
1207 | .data = &at91sam9rl_config, | ||
1208 | }, { | ||
1209 | .compatible = "atmel,at91sam9g45-dma", | ||
1210 | .data = &at91sam9g45_config, | ||
1211 | }, { | ||
1212 | /* sentinel */ | ||
1213 | } | ||
1214 | }; | ||
1215 | |||
1216 | MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids); | ||
1217 | #endif | ||
1218 | |||
1219 | static const struct platform_device_id atdma_devtypes[] = { | ||
1220 | { | ||
1221 | .name = "at91sam9rl_dma", | ||
1222 | .driver_data = (unsigned long) &at91sam9rl_config, | ||
1223 | }, { | ||
1224 | .name = "at91sam9g45_dma", | ||
1225 | .driver_data = (unsigned long) &at91sam9g45_config, | ||
1226 | }, { | ||
1227 | /* sentinel */ | ||
1228 | } | ||
1229 | }; | ||
1230 | |||
1231 | static inline const struct at_dma_platform_data * __init at_dma_get_driver_data( | ||
1232 | struct platform_device *pdev) | ||
1233 | { | ||
1234 | if (pdev->dev.of_node) { | ||
1235 | const struct of_device_id *match; | ||
1236 | match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node); | ||
1237 | if (match == NULL) | ||
1238 | return NULL; | ||
1239 | return match->data; | ||
1240 | } | ||
1241 | return (struct at_dma_platform_data *) | ||
1242 | platform_get_device_id(pdev)->driver_data; | ||
1243 | } | ||
1244 | |||
1245 | /** | 1170 | /** |
1246 | * at_dma_off - disable DMA controller | 1171 | * at_dma_off - disable DMA controller |
1247 | * @atdma: the Atmel HDAMC device | 1172 | * @atdma: the Atmel HDAMC device |
@@ -1260,23 +1185,18 @@ static void at_dma_off(struct at_dma *atdma) | |||
1260 | 1185 | ||
1261 | static int __init at_dma_probe(struct platform_device *pdev) | 1186 | static int __init at_dma_probe(struct platform_device *pdev) |
1262 | { | 1187 | { |
1188 | struct at_dma_platform_data *pdata; | ||
1263 | struct resource *io; | 1189 | struct resource *io; |
1264 | struct at_dma *atdma; | 1190 | struct at_dma *atdma; |
1265 | size_t size; | 1191 | size_t size; |
1266 | int irq; | 1192 | int irq; |
1267 | int err; | 1193 | int err; |
1268 | int i; | 1194 | int i; |
1269 | const struct at_dma_platform_data *plat_dat; | ||
1270 | 1195 | ||
1271 | /* setup platform data for each SoC */ | 1196 | /* get DMA Controller parameters from platform */ |
1272 | dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask); | 1197 | pdata = pdev->dev.platform_data; |
1273 | dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask); | 1198 | if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS) |
1274 | dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask); | 1199 | return -EINVAL; |
1275 | |||
1276 | /* get DMA parameters from controller type */ | ||
1277 | plat_dat = at_dma_get_driver_data(pdev); | ||
1278 | if (!plat_dat) | ||
1279 | return -ENODEV; | ||
1280 | 1200 | ||
1281 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1201 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1282 | if (!io) | 1202 | if (!io) |
@@ -1287,14 +1207,14 @@ static int __init at_dma_probe(struct platform_device *pdev) | |||
1287 | return irq; | 1207 | return irq; |
1288 | 1208 | ||
1289 | size = sizeof(struct at_dma); | 1209 | size = sizeof(struct at_dma); |
1290 | size += plat_dat->nr_channels * sizeof(struct at_dma_chan); | 1210 | size += pdata->nr_channels * sizeof(struct at_dma_chan); |
1291 | atdma = kzalloc(size, GFP_KERNEL); | 1211 | atdma = kzalloc(size, GFP_KERNEL); |
1292 | if (!atdma) | 1212 | if (!atdma) |
1293 | return -ENOMEM; | 1213 | return -ENOMEM; |
1294 | 1214 | ||
1295 | /* discover transaction capabilities */ | 1215 | /* discover transaction capabilites from the platform data */ |
1296 | atdma->dma_common.cap_mask = plat_dat->cap_mask; | 1216 | atdma->dma_common.cap_mask = pdata->cap_mask; |
1297 | atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1; | 1217 | atdma->all_chan_mask = (1 << pdata->nr_channels) - 1; |
1298 | 1218 | ||
1299 | size = resource_size(io); | 1219 | size = resource_size(io); |
1300 | if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { | 1220 | if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { |
@@ -1340,11 +1260,12 @@ static int __init at_dma_probe(struct platform_device *pdev) | |||
1340 | 1260 | ||
1341 | /* initialize channels related values */ | 1261 | /* initialize channels related values */ |
1342 | INIT_LIST_HEAD(&atdma->dma_common.channels); | 1262 | INIT_LIST_HEAD(&atdma->dma_common.channels); |
1343 | for (i = 0; i < plat_dat->nr_channels; i++) { | 1263 | for (i = 0; i < pdata->nr_channels; i++, atdma->dma_common.chancnt++) { |
1344 | struct at_dma_chan *atchan = &atdma->chan[i]; | 1264 | struct at_dma_chan *atchan = &atdma->chan[i]; |
1345 | 1265 | ||
1346 | atchan->chan_common.device = &atdma->dma_common; | 1266 | atchan->chan_common.device = &atdma->dma_common; |
1347 | dma_cookie_init(&atchan->chan_common); | 1267 | atchan->chan_common.cookie = atchan->completed_cookie = 1; |
1268 | atchan->chan_common.chan_id = i; | ||
1348 | list_add_tail(&atchan->chan_common.device_node, | 1269 | list_add_tail(&atchan->chan_common.device_node, |
1349 | &atdma->dma_common.channels); | 1270 | &atdma->dma_common.channels); |
1350 | 1271 | ||
@@ -1358,7 +1279,7 @@ static int __init at_dma_probe(struct platform_device *pdev) | |||
1358 | 1279 | ||
1359 | tasklet_init(&atchan->tasklet, atc_tasklet, | 1280 | tasklet_init(&atchan->tasklet, atc_tasklet, |
1360 | (unsigned long)atchan); | 1281 | (unsigned long)atchan); |
1361 | atc_enable_chan_irq(atdma, i); | 1282 | atc_enable_irq(atchan); |
1362 | } | 1283 | } |
1363 | 1284 | ||
1364 | /* set base routines */ | 1285 | /* set base routines */ |
@@ -1372,20 +1293,22 @@ static int __init at_dma_probe(struct platform_device *pdev) | |||
1372 | if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) | 1293 | if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) |
1373 | atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; | 1294 | atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; |
1374 | 1295 | ||
1375 | if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { | 1296 | if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) |
1376 | atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; | 1297 | atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; |
1377 | /* controller can do slave DMA: can trigger cyclic transfers */ | 1298 | |
1378 | dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask); | 1299 | if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask)) |
1379 | atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; | 1300 | atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; |
1301 | |||
1302 | if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) || | ||
1303 | dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask)) | ||
1380 | atdma->dma_common.device_control = atc_control; | 1304 | atdma->dma_common.device_control = atc_control; |
1381 | } | ||
1382 | 1305 | ||
1383 | dma_writel(atdma, EN, AT_DMA_ENABLE); | 1306 | dma_writel(atdma, EN, AT_DMA_ENABLE); |
1384 | 1307 | ||
1385 | dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", | 1308 | dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", |
1386 | dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", | 1309 | dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", |
1387 | dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", | 1310 | dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", |
1388 | plat_dat->nr_channels); | 1311 | atdma->dma_common.chancnt); |
1389 | 1312 | ||
1390 | dma_async_device_register(&atdma->dma_common); | 1313 | dma_async_device_register(&atdma->dma_common); |
1391 | 1314 | ||
@@ -1425,7 +1348,7 @@ static int __exit at_dma_remove(struct platform_device *pdev) | |||
1425 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 1348 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
1426 | 1349 | ||
1427 | /* Disable interrupts */ | 1350 | /* Disable interrupts */ |
1428 | atc_disable_chan_irq(atdma, chan->chan_id); | 1351 | atc_disable_irq(atchan); |
1429 | tasklet_disable(&atchan->tasklet); | 1352 | tasklet_disable(&atchan->tasklet); |
1430 | 1353 | ||
1431 | tasklet_kill(&atchan->tasklet); | 1354 | tasklet_kill(&atchan->tasklet); |
@@ -1454,112 +1377,27 @@ static void at_dma_shutdown(struct platform_device *pdev) | |||
1454 | clk_disable(atdma->clk); | 1377 | clk_disable(atdma->clk); |
1455 | } | 1378 | } |
1456 | 1379 | ||
1457 | static int at_dma_prepare(struct device *dev) | ||
1458 | { | ||
1459 | struct platform_device *pdev = to_platform_device(dev); | ||
1460 | struct at_dma *atdma = platform_get_drvdata(pdev); | ||
1461 | struct dma_chan *chan, *_chan; | ||
1462 | |||
1463 | list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, | ||
1464 | device_node) { | ||
1465 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | ||
1466 | /* wait for transaction completion (except in cyclic case) */ | ||
1467 | if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan)) | ||
1468 | return -EAGAIN; | ||
1469 | } | ||
1470 | return 0; | ||
1471 | } | ||
1472 | |||
1473 | static void atc_suspend_cyclic(struct at_dma_chan *atchan) | ||
1474 | { | ||
1475 | struct dma_chan *chan = &atchan->chan_common; | ||
1476 | |||
1477 | /* Channel should be paused by user | ||
1478 | * do it anyway even if it is not done already */ | ||
1479 | if (!atc_chan_is_paused(atchan)) { | ||
1480 | dev_warn(chan2dev(chan), | ||
1481 | "cyclic channel not paused, should be done by channel user\n"); | ||
1482 | atc_control(chan, DMA_PAUSE, 0); | ||
1483 | } | ||
1484 | |||
1485 | /* now preserve additional data for cyclic operations */ | ||
1486 | /* next descriptor address in the cyclic list */ | ||
1487 | atchan->save_dscr = channel_readl(atchan, DSCR); | ||
1488 | |||
1489 | vdbg_dump_regs(atchan); | ||
1490 | } | ||
1491 | |||
1492 | static int at_dma_suspend_noirq(struct device *dev) | 1380 | static int at_dma_suspend_noirq(struct device *dev) |
1493 | { | 1381 | { |
1494 | struct platform_device *pdev = to_platform_device(dev); | 1382 | struct platform_device *pdev = to_platform_device(dev); |
1495 | struct at_dma *atdma = platform_get_drvdata(pdev); | 1383 | struct at_dma *atdma = platform_get_drvdata(pdev); |
1496 | struct dma_chan *chan, *_chan; | ||
1497 | |||
1498 | /* preserve data */ | ||
1499 | list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, | ||
1500 | device_node) { | ||
1501 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | ||
1502 | 1384 | ||
1503 | if (atc_chan_is_cyclic(atchan)) | 1385 | at_dma_off(platform_get_drvdata(pdev)); |
1504 | atc_suspend_cyclic(atchan); | ||
1505 | atchan->save_cfg = channel_readl(atchan, CFG); | ||
1506 | } | ||
1507 | atdma->save_imr = dma_readl(atdma, EBCIMR); | ||
1508 | |||
1509 | /* disable DMA controller */ | ||
1510 | at_dma_off(atdma); | ||
1511 | clk_disable(atdma->clk); | 1386 | clk_disable(atdma->clk); |
1512 | return 0; | 1387 | return 0; |
1513 | } | 1388 | } |
1514 | 1389 | ||
1515 | static void atc_resume_cyclic(struct at_dma_chan *atchan) | ||
1516 | { | ||
1517 | struct at_dma *atdma = to_at_dma(atchan->chan_common.device); | ||
1518 | |||
1519 | /* restore channel status for cyclic descriptors list: | ||
1520 | * next descriptor in the cyclic list at the time of suspend */ | ||
1521 | channel_writel(atchan, SADDR, 0); | ||
1522 | channel_writel(atchan, DADDR, 0); | ||
1523 | channel_writel(atchan, CTRLA, 0); | ||
1524 | channel_writel(atchan, CTRLB, 0); | ||
1525 | channel_writel(atchan, DSCR, atchan->save_dscr); | ||
1526 | dma_writel(atdma, CHER, atchan->mask); | ||
1527 | |||
1528 | /* channel pause status should be removed by channel user | ||
1529 | * We cannot take the initiative to do it here */ | ||
1530 | |||
1531 | vdbg_dump_regs(atchan); | ||
1532 | } | ||
1533 | |||
1534 | static int at_dma_resume_noirq(struct device *dev) | 1390 | static int at_dma_resume_noirq(struct device *dev) |
1535 | { | 1391 | { |
1536 | struct platform_device *pdev = to_platform_device(dev); | 1392 | struct platform_device *pdev = to_platform_device(dev); |
1537 | struct at_dma *atdma = platform_get_drvdata(pdev); | 1393 | struct at_dma *atdma = platform_get_drvdata(pdev); |
1538 | struct dma_chan *chan, *_chan; | ||
1539 | 1394 | ||
1540 | /* bring back DMA controller */ | ||
1541 | clk_enable(atdma->clk); | 1395 | clk_enable(atdma->clk); |
1542 | dma_writel(atdma, EN, AT_DMA_ENABLE); | 1396 | dma_writel(atdma, EN, AT_DMA_ENABLE); |
1543 | |||
1544 | /* clear any pending interrupt */ | ||
1545 | while (dma_readl(atdma, EBCISR)) | ||
1546 | cpu_relax(); | ||
1547 | |||
1548 | /* restore saved data */ | ||
1549 | dma_writel(atdma, EBCIER, atdma->save_imr); | ||
1550 | list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, | ||
1551 | device_node) { | ||
1552 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | ||
1553 | |||
1554 | channel_writel(atchan, CFG, atchan->save_cfg); | ||
1555 | if (atc_chan_is_cyclic(atchan)) | ||
1556 | atc_resume_cyclic(atchan); | ||
1557 | } | ||
1558 | return 0; | 1397 | return 0; |
1559 | } | 1398 | } |
1560 | 1399 | ||
1561 | static const struct dev_pm_ops at_dma_dev_pm_ops = { | 1400 | static const struct dev_pm_ops at_dma_dev_pm_ops = { |
1562 | .prepare = at_dma_prepare, | ||
1563 | .suspend_noirq = at_dma_suspend_noirq, | 1401 | .suspend_noirq = at_dma_suspend_noirq, |
1564 | .resume_noirq = at_dma_resume_noirq, | 1402 | .resume_noirq = at_dma_resume_noirq, |
1565 | }; | 1403 | }; |
@@ -1567,11 +1405,9 @@ static const struct dev_pm_ops at_dma_dev_pm_ops = { | |||
1567 | static struct platform_driver at_dma_driver = { | 1405 | static struct platform_driver at_dma_driver = { |
1568 | .remove = __exit_p(at_dma_remove), | 1406 | .remove = __exit_p(at_dma_remove), |
1569 | .shutdown = at_dma_shutdown, | 1407 | .shutdown = at_dma_shutdown, |
1570 | .id_table = atdma_devtypes, | ||
1571 | .driver = { | 1408 | .driver = { |
1572 | .name = "at_hdmac", | 1409 | .name = "at_hdmac", |
1573 | .pm = &at_dma_dev_pm_ops, | 1410 | .pm = &at_dma_dev_pm_ops, |
1574 | .of_match_table = of_match_ptr(atmel_dma_dt_ids), | ||
1575 | }, | 1411 | }, |
1576 | }; | 1412 | }; |
1577 | 1413 | ||
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index 116e4adffb0..087dbf1dd39 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h | |||
@@ -11,7 +11,7 @@ | |||
11 | #ifndef AT_HDMAC_REGS_H | 11 | #ifndef AT_HDMAC_REGS_H |
12 | #define AT_HDMAC_REGS_H | 12 | #define AT_HDMAC_REGS_H |
13 | 13 | ||
14 | #include <linux/platform_data/dma-atmel.h> | 14 | #include <mach/at_hdmac.h> |
15 | 15 | ||
16 | #define AT_DMA_MAX_NR_CHANNELS 8 | 16 | #define AT_DMA_MAX_NR_CHANNELS 8 |
17 | 17 | ||
@@ -87,26 +87,7 @@ | |||
87 | /* Bitfields in CTRLA */ | 87 | /* Bitfields in CTRLA */ |
88 | #define ATC_BTSIZE_MAX 0xFFFFUL /* Maximum Buffer Transfer Size */ | 88 | #define ATC_BTSIZE_MAX 0xFFFFUL /* Maximum Buffer Transfer Size */ |
89 | #define ATC_BTSIZE(x) (ATC_BTSIZE_MAX & (x)) /* Buffer Transfer Size */ | 89 | #define ATC_BTSIZE(x) (ATC_BTSIZE_MAX & (x)) /* Buffer Transfer Size */ |
90 | #define ATC_SCSIZE_MASK (0x7 << 16) /* Source Chunk Transfer Size */ | 90 | /* Chunck Tranfer size definitions are in at_hdmac.h */ |
91 | #define ATC_SCSIZE(x) (ATC_SCSIZE_MASK & ((x) << 16)) | ||
92 | #define ATC_SCSIZE_1 (0x0 << 16) | ||
93 | #define ATC_SCSIZE_4 (0x1 << 16) | ||
94 | #define ATC_SCSIZE_8 (0x2 << 16) | ||
95 | #define ATC_SCSIZE_16 (0x3 << 16) | ||
96 | #define ATC_SCSIZE_32 (0x4 << 16) | ||
97 | #define ATC_SCSIZE_64 (0x5 << 16) | ||
98 | #define ATC_SCSIZE_128 (0x6 << 16) | ||
99 | #define ATC_SCSIZE_256 (0x7 << 16) | ||
100 | #define ATC_DCSIZE_MASK (0x7 << 20) /* Destination Chunk Transfer Size */ | ||
101 | #define ATC_DCSIZE(x) (ATC_DCSIZE_MASK & ((x) << 20)) | ||
102 | #define ATC_DCSIZE_1 (0x0 << 20) | ||
103 | #define ATC_DCSIZE_4 (0x1 << 20) | ||
104 | #define ATC_DCSIZE_8 (0x2 << 20) | ||
105 | #define ATC_DCSIZE_16 (0x3 << 20) | ||
106 | #define ATC_DCSIZE_32 (0x4 << 20) | ||
107 | #define ATC_DCSIZE_64 (0x5 << 20) | ||
108 | #define ATC_DCSIZE_128 (0x6 << 20) | ||
109 | #define ATC_DCSIZE_256 (0x7 << 20) | ||
110 | #define ATC_SRC_WIDTH_MASK (0x3 << 24) /* Source Single Transfer Size */ | 91 | #define ATC_SRC_WIDTH_MASK (0x3 << 24) /* Source Single Transfer Size */ |
111 | #define ATC_SRC_WIDTH(x) ((x) << 24) | 92 | #define ATC_SRC_WIDTH(x) ((x) << 24) |
112 | #define ATC_SRC_WIDTH_BYTE (0x0 << 24) | 93 | #define ATC_SRC_WIDTH_BYTE (0x0 << 24) |
@@ -223,11 +204,8 @@ enum atc_status { | |||
223 | * @status: transmit status information from irq/prep* functions | 204 | * @status: transmit status information from irq/prep* functions |
224 | * to tasklet (use atomic operations) | 205 | * to tasklet (use atomic operations) |
225 | * @tasklet: bottom half to finish transaction work | 206 | * @tasklet: bottom half to finish transaction work |
226 | * @save_cfg: configuration register that is saved on suspend/resume cycle | ||
227 | * @save_dscr: for cyclic operations, preserve next descriptor address in | ||
228 | * the cyclic list on suspend/resume cycle | ||
229 | * @dma_sconfig: configuration for slave transfers, passed via DMA_SLAVE_CONFIG | ||
230 | * @lock: serializes enqueue/dequeue operations to descriptors lists | 207 | * @lock: serializes enqueue/dequeue operations to descriptors lists |
208 | * @completed_cookie: identifier for the most recently completed operation | ||
231 | * @active_list: list of descriptors dmaengine is being running on | 209 | * @active_list: list of descriptors dmaengine is being running on |
232 | * @queue: list of descriptors ready to be submitted to engine | 210 | * @queue: list of descriptors ready to be submitted to engine |
233 | * @free_list: list of descriptors usable by the channel | 211 | * @free_list: list of descriptors usable by the channel |
@@ -240,13 +218,11 @@ struct at_dma_chan { | |||
240 | u8 mask; | 218 | u8 mask; |
241 | unsigned long status; | 219 | unsigned long status; |
242 | struct tasklet_struct tasklet; | 220 | struct tasklet_struct tasklet; |
243 | u32 save_cfg; | ||
244 | u32 save_dscr; | ||
245 | struct dma_slave_config dma_sconfig; | ||
246 | 221 | ||
247 | spinlock_t lock; | 222 | spinlock_t lock; |
248 | 223 | ||
249 | /* these other elements are all protected by lock */ | 224 | /* these other elements are all protected by lock */ |
225 | dma_cookie_t completed_cookie; | ||
250 | struct list_head active_list; | 226 | struct list_head active_list; |
251 | struct list_head queue; | 227 | struct list_head queue; |
252 | struct list_head free_list; | 228 | struct list_head free_list; |
@@ -264,46 +240,14 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan) | |||
264 | return container_of(dchan, struct at_dma_chan, chan_common); | 240 | return container_of(dchan, struct at_dma_chan, chan_common); |
265 | } | 241 | } |
266 | 242 | ||
267 | /* | ||
268 | * Fix sconfig's burst size according to at_hdmac. We need to convert them as: | ||
269 | * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3, 32 -> 4, 64 -> 5, 128 -> 6, 256 -> 7. | ||
270 | * | ||
271 | * This can be done by finding most significant bit set. | ||
272 | */ | ||
273 | static inline void convert_burst(u32 *maxburst) | ||
274 | { | ||
275 | if (*maxburst > 1) | ||
276 | *maxburst = fls(*maxburst) - 2; | ||
277 | else | ||
278 | *maxburst = 0; | ||
279 | } | ||
280 | |||
281 | /* | ||
282 | * Fix sconfig's bus width according to at_hdmac. | ||
283 | * 1 byte -> 0, 2 bytes -> 1, 4 bytes -> 2. | ||
284 | */ | ||
285 | static inline u8 convert_buswidth(enum dma_slave_buswidth addr_width) | ||
286 | { | ||
287 | switch (addr_width) { | ||
288 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
289 | return 1; | ||
290 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
291 | return 2; | ||
292 | default: | ||
293 | /* For 1 byte width or fallback */ | ||
294 | return 0; | ||
295 | } | ||
296 | } | ||
297 | 243 | ||
298 | /*-- Controller ------------------------------------------------------*/ | 244 | /*-- Controller ------------------------------------------------------*/ |
299 | 245 | ||
300 | /** | 246 | /** |
301 | * struct at_dma - internal representation of an Atmel HDMA Controller | 247 | * struct at_dma - internal representation of an Atmel HDMA Controller |
302 | * @chan_common: common dmaengine dma_device object members | 248 | * @chan_common: common dmaengine dma_device object members |
303 | * @atdma_devtype: identifier of DMA controller compatibility | ||
304 | * @ch_regs: memory mapped register base | 249 | * @ch_regs: memory mapped register base |
305 | * @clk: dma controller clock | 250 | * @clk: dma controller clock |
306 | * @save_imr: interrupt mask register that is saved on suspend/resume cycle | ||
307 | * @all_chan_mask: all channels availlable in a mask | 251 | * @all_chan_mask: all channels availlable in a mask |
308 | * @dma_desc_pool: base of DMA descriptor region (DMA address) | 252 | * @dma_desc_pool: base of DMA descriptor region (DMA address) |
309 | * @chan: channels table to store at_dma_chan structures | 253 | * @chan: channels table to store at_dma_chan structures |
@@ -312,7 +256,6 @@ struct at_dma { | |||
312 | struct dma_device dma_common; | 256 | struct dma_device dma_common; |
313 | void __iomem *regs; | 257 | void __iomem *regs; |
314 | struct clk *clk; | 258 | struct clk *clk; |
315 | u32 save_imr; | ||
316 | 259 | ||
317 | u8 all_chan_mask; | 260 | u8 all_chan_mask; |
318 | 261 | ||
@@ -376,27 +319,28 @@ static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli) | |||
376 | } | 319 | } |
377 | 320 | ||
378 | 321 | ||
379 | static void atc_setup_irq(struct at_dma *atdma, int chan_id, int on) | 322 | static void atc_setup_irq(struct at_dma_chan *atchan, int on) |
380 | { | 323 | { |
381 | u32 ebci; | 324 | struct at_dma *atdma = to_at_dma(atchan->chan_common.device); |
325 | u32 ebci; | ||
382 | 326 | ||
383 | /* enable interrupts on buffer transfer completion & error */ | 327 | /* enable interrupts on buffer transfer completion & error */ |
384 | ebci = AT_DMA_BTC(chan_id) | 328 | ebci = AT_DMA_BTC(atchan->chan_common.chan_id) |
385 | | AT_DMA_ERR(chan_id); | 329 | | AT_DMA_ERR(atchan->chan_common.chan_id); |
386 | if (on) | 330 | if (on) |
387 | dma_writel(atdma, EBCIER, ebci); | 331 | dma_writel(atdma, EBCIER, ebci); |
388 | else | 332 | else |
389 | dma_writel(atdma, EBCIDR, ebci); | 333 | dma_writel(atdma, EBCIDR, ebci); |
390 | } | 334 | } |
391 | 335 | ||
392 | static void atc_enable_chan_irq(struct at_dma *atdma, int chan_id) | 336 | static inline void atc_enable_irq(struct at_dma_chan *atchan) |
393 | { | 337 | { |
394 | atc_setup_irq(atdma, chan_id, 1); | 338 | atc_setup_irq(atchan, 1); |
395 | } | 339 | } |
396 | 340 | ||
397 | static void atc_disable_chan_irq(struct at_dma *atdma, int chan_id) | 341 | static inline void atc_disable_irq(struct at_dma_chan *atchan) |
398 | { | 342 | { |
399 | atc_setup_irq(atdma, chan_id, 0); | 343 | atc_setup_irq(atchan, 0); |
400 | } | 344 | } |
401 | 345 | ||
402 | 346 | ||
@@ -411,23 +355,6 @@ static inline int atc_chan_is_enabled(struct at_dma_chan *atchan) | |||
411 | return !!(dma_readl(atdma, CHSR) & atchan->mask); | 355 | return !!(dma_readl(atdma, CHSR) & atchan->mask); |
412 | } | 356 | } |
413 | 357 | ||
414 | /** | ||
415 | * atc_chan_is_paused - test channel pause/resume status | ||
416 | * @atchan: channel we want to test status | ||
417 | */ | ||
418 | static inline int atc_chan_is_paused(struct at_dma_chan *atchan) | ||
419 | { | ||
420 | return test_bit(ATC_IS_PAUSED, &atchan->status); | ||
421 | } | ||
422 | |||
423 | /** | ||
424 | * atc_chan_is_cyclic - test if given channel has cyclic property set | ||
425 | * @atchan: channel we want to test status | ||
426 | */ | ||
427 | static inline int atc_chan_is_cyclic(struct at_dma_chan *atchan) | ||
428 | { | ||
429 | return test_bit(ATC_IS_CYCLIC, &atchan->status); | ||
430 | } | ||
431 | 358 | ||
432 | /** | 359 | /** |
433 | * set_desc_eol - set end-of-link to descriptor so it will end transfer | 360 | * set_desc_eol - set end-of-link to descriptor so it will end transfer |
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index aa384e53b7a..4234f416ef1 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <mach/coh901318.h> | 24 | #include <mach/coh901318.h> |
25 | 25 | ||
26 | #include "coh901318_lli.h" | 26 | #include "coh901318_lli.h" |
27 | #include "dmaengine.h" | ||
28 | 27 | ||
29 | #define COHC_2_DEV(cohc) (&cohc->chan.dev->device) | 28 | #define COHC_2_DEV(cohc) (&cohc->chan.dev->device) |
30 | 29 | ||
@@ -40,7 +39,7 @@ struct coh901318_desc { | |||
40 | struct scatterlist *sg; | 39 | struct scatterlist *sg; |
41 | unsigned int sg_len; | 40 | unsigned int sg_len; |
42 | struct coh901318_lli *lli; | 41 | struct coh901318_lli *lli; |
43 | enum dma_transfer_direction dir; | 42 | enum dma_data_direction dir; |
44 | unsigned long flags; | 43 | unsigned long flags; |
45 | u32 head_config; | 44 | u32 head_config; |
46 | u32 head_ctrl; | 45 | u32 head_ctrl; |
@@ -60,6 +59,7 @@ struct coh901318_base { | |||
60 | struct coh901318_chan { | 59 | struct coh901318_chan { |
61 | spinlock_t lock; | 60 | spinlock_t lock; |
62 | int allocated; | 61 | int allocated; |
62 | int completed; | ||
63 | int id; | 63 | int id; |
64 | int stopped; | 64 | int stopped; |
65 | 65 | ||
@@ -104,6 +104,13 @@ static void coh901318_list_print(struct coh901318_chan *cohc, | |||
104 | static struct coh901318_base *debugfs_dma_base; | 104 | static struct coh901318_base *debugfs_dma_base; |
105 | static struct dentry *dma_dentry; | 105 | static struct dentry *dma_dentry; |
106 | 106 | ||
107 | static int coh901318_debugfs_open(struct inode *inode, struct file *file) | ||
108 | { | ||
109 | |||
110 | file->private_data = inode->i_private; | ||
111 | return 0; | ||
112 | } | ||
113 | |||
107 | static int coh901318_debugfs_read(struct file *file, char __user *buf, | 114 | static int coh901318_debugfs_read(struct file *file, char __user *buf, |
108 | size_t count, loff_t *f_pos) | 115 | size_t count, loff_t *f_pos) |
109 | { | 116 | { |
@@ -151,7 +158,7 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf, | |||
151 | 158 | ||
152 | static const struct file_operations coh901318_debugfs_status_operations = { | 159 | static const struct file_operations coh901318_debugfs_status_operations = { |
153 | .owner = THIS_MODULE, | 160 | .owner = THIS_MODULE, |
154 | .open = simple_open, | 161 | .open = coh901318_debugfs_open, |
155 | .read = coh901318_debugfs_read, | 162 | .read = coh901318_debugfs_read, |
156 | .llseek = default_llseek, | 163 | .llseek = default_llseek, |
157 | }; | 164 | }; |
@@ -311,6 +318,20 @@ static int coh901318_prep_linked_list(struct coh901318_chan *cohc, | |||
311 | 318 | ||
312 | return 0; | 319 | return 0; |
313 | } | 320 | } |
321 | static dma_cookie_t | ||
322 | coh901318_assign_cookie(struct coh901318_chan *cohc, | ||
323 | struct coh901318_desc *cohd) | ||
324 | { | ||
325 | dma_cookie_t cookie = cohc->chan.cookie; | ||
326 | |||
327 | if (++cookie < 0) | ||
328 | cookie = 1; | ||
329 | |||
330 | cohc->chan.cookie = cookie; | ||
331 | cohd->desc.cookie = cookie; | ||
332 | |||
333 | return cookie; | ||
334 | } | ||
314 | 335 | ||
315 | static struct coh901318_desc * | 336 | static struct coh901318_desc * |
316 | coh901318_desc_get(struct coh901318_chan *cohc) | 337 | coh901318_desc_get(struct coh901318_chan *cohc) |
@@ -684,7 +705,7 @@ static void dma_tasklet(unsigned long data) | |||
684 | callback_param = cohd_fin->desc.callback_param; | 705 | callback_param = cohd_fin->desc.callback_param; |
685 | 706 | ||
686 | /* sign this job as completed on the channel */ | 707 | /* sign this job as completed on the channel */ |
687 | dma_cookie_complete(&cohd_fin->desc); | 708 | cohc->completed = cohd_fin->desc.cookie; |
688 | 709 | ||
689 | /* release the lli allocation and remove the descriptor */ | 710 | /* release the lli allocation and remove the descriptor */ |
690 | coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli); | 711 | coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli); |
@@ -908,7 +929,7 @@ static int coh901318_alloc_chan_resources(struct dma_chan *chan) | |||
908 | coh901318_config(cohc, NULL); | 929 | coh901318_config(cohc, NULL); |
909 | 930 | ||
910 | cohc->allocated = 1; | 931 | cohc->allocated = 1; |
911 | dma_cookie_init(chan); | 932 | cohc->completed = chan->cookie = 1; |
912 | 933 | ||
913 | spin_unlock_irqrestore(&cohc->lock, flags); | 934 | spin_unlock_irqrestore(&cohc->lock, flags); |
914 | 935 | ||
@@ -945,16 +966,16 @@ coh901318_tx_submit(struct dma_async_tx_descriptor *tx) | |||
945 | desc); | 966 | desc); |
946 | struct coh901318_chan *cohc = to_coh901318_chan(tx->chan); | 967 | struct coh901318_chan *cohc = to_coh901318_chan(tx->chan); |
947 | unsigned long flags; | 968 | unsigned long flags; |
948 | dma_cookie_t cookie; | ||
949 | 969 | ||
950 | spin_lock_irqsave(&cohc->lock, flags); | 970 | spin_lock_irqsave(&cohc->lock, flags); |
951 | cookie = dma_cookie_assign(tx); | 971 | |
972 | tx->cookie = coh901318_assign_cookie(cohc, cohd); | ||
952 | 973 | ||
953 | coh901318_desc_queue(cohc, cohd); | 974 | coh901318_desc_queue(cohc, cohd); |
954 | 975 | ||
955 | spin_unlock_irqrestore(&cohc->lock, flags); | 976 | spin_unlock_irqrestore(&cohc->lock, flags); |
956 | 977 | ||
957 | return cookie; | 978 | return tx->cookie; |
958 | } | 979 | } |
959 | 980 | ||
960 | static struct dma_async_tx_descriptor * | 981 | static struct dma_async_tx_descriptor * |
@@ -1013,8 +1034,8 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
1013 | 1034 | ||
1014 | static struct dma_async_tx_descriptor * | 1035 | static struct dma_async_tx_descriptor * |
1015 | coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 1036 | coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
1016 | unsigned int sg_len, enum dma_transfer_direction direction, | 1037 | unsigned int sg_len, enum dma_data_direction direction, |
1017 | unsigned long flags, void *context) | 1038 | unsigned long flags) |
1018 | { | 1039 | { |
1019 | struct coh901318_chan *cohc = to_coh901318_chan(chan); | 1040 | struct coh901318_chan *cohc = to_coh901318_chan(chan); |
1020 | struct coh901318_lli *lli; | 1041 | struct coh901318_lli *lli; |
@@ -1033,7 +1054,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
1033 | 1054 | ||
1034 | if (!sgl) | 1055 | if (!sgl) |
1035 | goto out; | 1056 | goto out; |
1036 | if (sg_dma_len(sgl) == 0) | 1057 | if (sgl->length == 0) |
1037 | goto out; | 1058 | goto out; |
1038 | 1059 | ||
1039 | spin_lock_irqsave(&cohc->lock, flg); | 1060 | spin_lock_irqsave(&cohc->lock, flg); |
@@ -1056,7 +1077,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
1056 | ctrl_last |= cohc->runtime_ctrl; | 1077 | ctrl_last |= cohc->runtime_ctrl; |
1057 | ctrl |= cohc->runtime_ctrl; | 1078 | ctrl |= cohc->runtime_ctrl; |
1058 | 1079 | ||
1059 | if (direction == DMA_MEM_TO_DEV) { | 1080 | if (direction == DMA_TO_DEVICE) { |
1060 | u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE | | 1081 | u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE | |
1061 | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE; | 1082 | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE; |
1062 | 1083 | ||
@@ -1064,7 +1085,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
1064 | ctrl_chained |= tx_flags; | 1085 | ctrl_chained |= tx_flags; |
1065 | ctrl_last |= tx_flags; | 1086 | ctrl_last |= tx_flags; |
1066 | ctrl |= tx_flags; | 1087 | ctrl |= tx_flags; |
1067 | } else if (direction == DMA_DEV_TO_MEM) { | 1088 | } else if (direction == DMA_FROM_DEVICE) { |
1068 | u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST | | 1089 | u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST | |
1069 | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE; | 1090 | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE; |
1070 | 1091 | ||
@@ -1144,12 +1165,17 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
1144 | struct dma_tx_state *txstate) | 1165 | struct dma_tx_state *txstate) |
1145 | { | 1166 | { |
1146 | struct coh901318_chan *cohc = to_coh901318_chan(chan); | 1167 | struct coh901318_chan *cohc = to_coh901318_chan(chan); |
1147 | enum dma_status ret; | 1168 | dma_cookie_t last_used; |
1169 | dma_cookie_t last_complete; | ||
1170 | int ret; | ||
1148 | 1171 | ||
1149 | ret = dma_cookie_status(chan, cookie, txstate); | 1172 | last_complete = cohc->completed; |
1150 | /* FIXME: should be conditional on ret != DMA_SUCCESS? */ | 1173 | last_used = chan->cookie; |
1151 | dma_set_residue(txstate, coh901318_get_bytes_left(chan)); | ||
1152 | 1174 | ||
1175 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
1176 | |||
1177 | dma_set_tx_state(txstate, last_complete, last_used, | ||
1178 | coh901318_get_bytes_left(chan)); | ||
1153 | if (ret == DMA_IN_PROGRESS && cohc->stopped) | 1179 | if (ret == DMA_IN_PROGRESS && cohc->stopped) |
1154 | ret = DMA_PAUSED; | 1180 | ret = DMA_PAUSED; |
1155 | 1181 | ||
@@ -1248,11 +1274,11 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan, | |||
1248 | int i = 0; | 1274 | int i = 0; |
1249 | 1275 | ||
1250 | /* We only support mem to per or per to mem transfers */ | 1276 | /* We only support mem to per or per to mem transfers */ |
1251 | if (config->direction == DMA_DEV_TO_MEM) { | 1277 | if (config->direction == DMA_FROM_DEVICE) { |
1252 | addr = config->src_addr; | 1278 | addr = config->src_addr; |
1253 | addr_width = config->src_addr_width; | 1279 | addr_width = config->src_addr_width; |
1254 | maxburst = config->src_maxburst; | 1280 | maxburst = config->src_maxburst; |
1255 | } else if (config->direction == DMA_MEM_TO_DEV) { | 1281 | } else if (config->direction == DMA_TO_DEVICE) { |
1256 | addr = config->dst_addr; | 1282 | addr = config->dst_addr; |
1257 | addr_width = config->dst_addr_width; | 1283 | addr_width = config->dst_addr_width; |
1258 | maxburst = config->dst_maxburst; | 1284 | maxburst = config->dst_maxburst; |
@@ -1438,32 +1464,34 @@ static int __init coh901318_probe(struct platform_device *pdev) | |||
1438 | 1464 | ||
1439 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1465 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1440 | if (!io) | 1466 | if (!io) |
1441 | return -ENODEV; | 1467 | goto err_get_resource; |
1442 | 1468 | ||
1443 | /* Map DMA controller registers to virtual memory */ | 1469 | /* Map DMA controller registers to virtual memory */ |
1444 | if (devm_request_mem_region(&pdev->dev, | 1470 | if (request_mem_region(io->start, |
1445 | io->start, | 1471 | resource_size(io), |
1446 | resource_size(io), | 1472 | pdev->dev.driver->name) == NULL) { |
1447 | pdev->dev.driver->name) == NULL) | 1473 | err = -EBUSY; |
1448 | return -ENOMEM; | 1474 | goto err_request_mem; |
1475 | } | ||
1449 | 1476 | ||
1450 | pdata = pdev->dev.platform_data; | 1477 | pdata = pdev->dev.platform_data; |
1451 | if (!pdata) | 1478 | if (!pdata) |
1452 | return -ENODEV; | 1479 | goto err_no_platformdata; |
1453 | 1480 | ||
1454 | base = devm_kzalloc(&pdev->dev, | 1481 | base = kmalloc(ALIGN(sizeof(struct coh901318_base), 4) + |
1455 | ALIGN(sizeof(struct coh901318_base), 4) + | 1482 | pdata->max_channels * |
1456 | pdata->max_channels * | 1483 | sizeof(struct coh901318_chan), |
1457 | sizeof(struct coh901318_chan), | 1484 | GFP_KERNEL); |
1458 | GFP_KERNEL); | ||
1459 | if (!base) | 1485 | if (!base) |
1460 | return -ENOMEM; | 1486 | goto err_alloc_coh_dma_channels; |
1461 | 1487 | ||
1462 | base->chans = ((void *)base) + ALIGN(sizeof(struct coh901318_base), 4); | 1488 | base->chans = ((void *)base) + ALIGN(sizeof(struct coh901318_base), 4); |
1463 | 1489 | ||
1464 | base->virtbase = devm_ioremap(&pdev->dev, io->start, resource_size(io)); | 1490 | base->virtbase = ioremap(io->start, resource_size(io)); |
1465 | if (!base->virtbase) | 1491 | if (!base->virtbase) { |
1466 | return -ENOMEM; | 1492 | err = -ENOMEM; |
1493 | goto err_no_ioremap; | ||
1494 | } | ||
1467 | 1495 | ||
1468 | base->dev = &pdev->dev; | 1496 | base->dev = &pdev->dev; |
1469 | base->platform = pdata; | 1497 | base->platform = pdata; |
@@ -1472,20 +1500,25 @@ static int __init coh901318_probe(struct platform_device *pdev) | |||
1472 | 1500 | ||
1473 | COH901318_DEBUGFS_ASSIGN(debugfs_dma_base, base); | 1501 | COH901318_DEBUGFS_ASSIGN(debugfs_dma_base, base); |
1474 | 1502 | ||
1503 | platform_set_drvdata(pdev, base); | ||
1504 | |||
1475 | irq = platform_get_irq(pdev, 0); | 1505 | irq = platform_get_irq(pdev, 0); |
1476 | if (irq < 0) | 1506 | if (irq < 0) |
1477 | return irq; | 1507 | goto err_no_irq; |
1478 | 1508 | ||
1479 | err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, IRQF_DISABLED, | 1509 | err = request_irq(irq, dma_irq_handler, IRQF_DISABLED, |
1480 | "coh901318", base); | 1510 | "coh901318", base); |
1481 | if (err) | 1511 | if (err) { |
1482 | return err; | 1512 | dev_crit(&pdev->dev, |
1513 | "Cannot allocate IRQ for DMA controller!\n"); | ||
1514 | goto err_request_irq; | ||
1515 | } | ||
1483 | 1516 | ||
1484 | err = coh901318_pool_create(&base->pool, &pdev->dev, | 1517 | err = coh901318_pool_create(&base->pool, &pdev->dev, |
1485 | sizeof(struct coh901318_lli), | 1518 | sizeof(struct coh901318_lli), |
1486 | 32); | 1519 | 32); |
1487 | if (err) | 1520 | if (err) |
1488 | return err; | 1521 | goto err_pool_create; |
1489 | 1522 | ||
1490 | /* init channels for device transfers */ | 1523 | /* init channels for device transfers */ |
1491 | coh901318_base_init(&base->dma_slave, base->platform->chans_slave, | 1524 | coh901318_base_init(&base->dma_slave, base->platform->chans_slave, |
@@ -1531,7 +1564,6 @@ static int __init coh901318_probe(struct platform_device *pdev) | |||
1531 | if (err) | 1564 | if (err) |
1532 | goto err_register_memcpy; | 1565 | goto err_register_memcpy; |
1533 | 1566 | ||
1534 | platform_set_drvdata(pdev, base); | ||
1535 | dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n", | 1567 | dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n", |
1536 | (u32) base->virtbase); | 1568 | (u32) base->virtbase); |
1537 | 1569 | ||
@@ -1541,6 +1573,19 @@ static int __init coh901318_probe(struct platform_device *pdev) | |||
1541 | dma_async_device_unregister(&base->dma_slave); | 1573 | dma_async_device_unregister(&base->dma_slave); |
1542 | err_register_slave: | 1574 | err_register_slave: |
1543 | coh901318_pool_destroy(&base->pool); | 1575 | coh901318_pool_destroy(&base->pool); |
1576 | err_pool_create: | ||
1577 | free_irq(platform_get_irq(pdev, 0), base); | ||
1578 | err_request_irq: | ||
1579 | err_no_irq: | ||
1580 | iounmap(base->virtbase); | ||
1581 | err_no_ioremap: | ||
1582 | kfree(base); | ||
1583 | err_alloc_coh_dma_channels: | ||
1584 | err_no_platformdata: | ||
1585 | release_mem_region(pdev->resource->start, | ||
1586 | resource_size(pdev->resource)); | ||
1587 | err_request_mem: | ||
1588 | err_get_resource: | ||
1544 | return err; | 1589 | return err; |
1545 | } | 1590 | } |
1546 | 1591 | ||
@@ -1551,6 +1596,11 @@ static int __exit coh901318_remove(struct platform_device *pdev) | |||
1551 | dma_async_device_unregister(&base->dma_memcpy); | 1596 | dma_async_device_unregister(&base->dma_memcpy); |
1552 | dma_async_device_unregister(&base->dma_slave); | 1597 | dma_async_device_unregister(&base->dma_slave); |
1553 | coh901318_pool_destroy(&base->pool); | 1598 | coh901318_pool_destroy(&base->pool); |
1599 | free_irq(platform_get_irq(pdev, 0), base); | ||
1600 | iounmap(base->virtbase); | ||
1601 | kfree(base); | ||
1602 | release_mem_region(pdev->resource->start, | ||
1603 | resource_size(pdev->resource)); | ||
1554 | return 0; | 1604 | return 0; |
1555 | } | 1605 | } |
1556 | 1606 | ||
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c index 780e0429b38..9f7e0e6a7ee 100644 --- a/drivers/dma/coh901318_lli.c +++ b/drivers/dma/coh901318_lli.c | |||
@@ -7,10 +7,11 @@ | |||
7 | * Author: Per Friden <per.friden@stericsson.com> | 7 | * Author: Per Friden <per.friden@stericsson.com> |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/dma-mapping.h> | ||
10 | #include <linux/spinlock.h> | 11 | #include <linux/spinlock.h> |
12 | #include <linux/dmapool.h> | ||
11 | #include <linux/memory.h> | 13 | #include <linux/memory.h> |
12 | #include <linux/gfp.h> | 14 | #include <linux/gfp.h> |
13 | #include <linux/dmapool.h> | ||
14 | #include <mach/coh901318.h> | 15 | #include <mach/coh901318.h> |
15 | 16 | ||
16 | #include "coh901318_lli.h" | 17 | #include "coh901318_lli.h" |
@@ -176,18 +177,18 @@ coh901318_lli_fill_single(struct coh901318_pool *pool, | |||
176 | struct coh901318_lli *lli, | 177 | struct coh901318_lli *lli, |
177 | dma_addr_t buf, unsigned int size, | 178 | dma_addr_t buf, unsigned int size, |
178 | dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom, | 179 | dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom, |
179 | enum dma_transfer_direction dir) | 180 | enum dma_data_direction dir) |
180 | { | 181 | { |
181 | int s = size; | 182 | int s = size; |
182 | dma_addr_t src; | 183 | dma_addr_t src; |
183 | dma_addr_t dst; | 184 | dma_addr_t dst; |
184 | 185 | ||
185 | 186 | ||
186 | if (dir == DMA_MEM_TO_DEV) { | 187 | if (dir == DMA_TO_DEVICE) { |
187 | src = buf; | 188 | src = buf; |
188 | dst = dev_addr; | 189 | dst = dev_addr; |
189 | 190 | ||
190 | } else if (dir == DMA_DEV_TO_MEM) { | 191 | } else if (dir == DMA_FROM_DEVICE) { |
191 | 192 | ||
192 | src = dev_addr; | 193 | src = dev_addr; |
193 | dst = buf; | 194 | dst = buf; |
@@ -214,9 +215,9 @@ coh901318_lli_fill_single(struct coh901318_pool *pool, | |||
214 | 215 | ||
215 | lli = coh901318_lli_next(lli); | 216 | lli = coh901318_lli_next(lli); |
216 | 217 | ||
217 | if (dir == DMA_MEM_TO_DEV) | 218 | if (dir == DMA_TO_DEVICE) |
218 | src += block_size; | 219 | src += block_size; |
219 | else if (dir == DMA_DEV_TO_MEM) | 220 | else if (dir == DMA_FROM_DEVICE) |
220 | dst += block_size; | 221 | dst += block_size; |
221 | } | 222 | } |
222 | 223 | ||
@@ -233,7 +234,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, | |||
233 | struct scatterlist *sgl, unsigned int nents, | 234 | struct scatterlist *sgl, unsigned int nents, |
234 | dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl, | 235 | dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl, |
235 | u32 ctrl_last, | 236 | u32 ctrl_last, |
236 | enum dma_transfer_direction dir, u32 ctrl_irq_mask) | 237 | enum dma_data_direction dir, u32 ctrl_irq_mask) |
237 | { | 238 | { |
238 | int i; | 239 | int i; |
239 | struct scatterlist *sg; | 240 | struct scatterlist *sg; |
@@ -248,9 +249,9 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, | |||
248 | 249 | ||
249 | spin_lock(&pool->lock); | 250 | spin_lock(&pool->lock); |
250 | 251 | ||
251 | if (dir == DMA_MEM_TO_DEV) | 252 | if (dir == DMA_TO_DEVICE) |
252 | dst = dev_addr; | 253 | dst = dev_addr; |
253 | else if (dir == DMA_DEV_TO_MEM) | 254 | else if (dir == DMA_FROM_DEVICE) |
254 | src = dev_addr; | 255 | src = dev_addr; |
255 | else | 256 | else |
256 | goto err; | 257 | goto err; |
@@ -268,12 +269,12 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, | |||
268 | ctrl_sg = ctrl ? ctrl : ctrl_last; | 269 | ctrl_sg = ctrl ? ctrl : ctrl_last; |
269 | 270 | ||
270 | 271 | ||
271 | if (dir == DMA_MEM_TO_DEV) | 272 | if (dir == DMA_TO_DEVICE) |
272 | /* increment source address */ | 273 | /* increment source address */ |
273 | src = sg_dma_address(sg); | 274 | src = sg_phys(sg); |
274 | else | 275 | else |
275 | /* increment destination address */ | 276 | /* increment destination address */ |
276 | dst = sg_dma_address(sg); | 277 | dst = sg_phys(sg); |
277 | 278 | ||
278 | bytes_to_transfer = sg_dma_len(sg); | 279 | bytes_to_transfer = sg_dma_len(sg); |
279 | 280 | ||
@@ -292,7 +293,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, | |||
292 | lli->src_addr = src; | 293 | lli->src_addr = src; |
293 | lli->dst_addr = dst; | 294 | lli->dst_addr = dst; |
294 | 295 | ||
295 | if (dir == DMA_DEV_TO_MEM) | 296 | if (dir == DMA_FROM_DEVICE) |
296 | dst += elem_size; | 297 | dst += elem_size; |
297 | else | 298 | else |
298 | src += elem_size; | 299 | src += elem_size; |
diff --git a/drivers/dma/coh901318_lli.h b/drivers/dma/coh901318_lli.h index abff3714fdd..7a5c80990e9 100644 --- a/drivers/dma/coh901318_lli.h +++ b/drivers/dma/coh901318_lli.h | |||
@@ -97,7 +97,7 @@ coh901318_lli_fill_single(struct coh901318_pool *pool, | |||
97 | struct coh901318_lli *lli, | 97 | struct coh901318_lli *lli, |
98 | dma_addr_t buf, unsigned int size, | 98 | dma_addr_t buf, unsigned int size, |
99 | dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_last, | 99 | dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_last, |
100 | enum dma_transfer_direction dir); | 100 | enum dma_data_direction dir); |
101 | 101 | ||
102 | /** | 102 | /** |
103 | * coh901318_lli_fill_single() - Prepares the lli:s for dma scatter list transfer | 103 | * coh901318_lli_fill_single() - Prepares the lli:s for dma scatter list transfer |
@@ -119,6 +119,6 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, | |||
119 | struct scatterlist *sg, unsigned int nents, | 119 | struct scatterlist *sg, unsigned int nents, |
120 | dma_addr_t dev_addr, u32 ctrl_chained, | 120 | dma_addr_t dev_addr, u32 ctrl_chained, |
121 | u32 ctrl, u32 ctrl_last, | 121 | u32 ctrl, u32 ctrl_last, |
122 | enum dma_transfer_direction dir, u32 ctrl_irq_mask); | 122 | enum dma_data_direction dir, u32 ctrl_irq_mask); |
123 | 123 | ||
124 | #endif /* COH901318_LLI_H */ | 124 | #endif /* COH901318_LLI_H */ |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index a815d44c70a..b48967b499d 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -45,8 +45,6 @@ | |||
45 | * See Documentation/dmaengine.txt for more details | 45 | * See Documentation/dmaengine.txt for more details |
46 | */ | 46 | */ |
47 | 47 | ||
48 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
49 | |||
50 | #include <linux/dma-mapping.h> | 48 | #include <linux/dma-mapping.h> |
51 | #include <linux/init.h> | 49 | #include <linux/init.h> |
52 | #include <linux/module.h> | 50 | #include <linux/module.h> |
@@ -263,7 +261,7 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) | |||
263 | do { | 261 | do { |
264 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); | 262 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); |
265 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { | 263 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { |
266 | pr_err("%s: timeout!\n", __func__); | 264 | printk(KERN_ERR "dma_sync_wait_timeout!\n"); |
267 | return DMA_ERROR; | 265 | return DMA_ERROR; |
268 | } | 266 | } |
269 | } while (status == DMA_IN_PROGRESS); | 267 | } while (status == DMA_IN_PROGRESS); |
@@ -314,7 +312,7 @@ static int __init dma_channel_table_init(void) | |||
314 | } | 312 | } |
315 | 313 | ||
316 | if (err) { | 314 | if (err) { |
317 | pr_err("initialization failure\n"); | 315 | pr_err("dmaengine: initialization failure\n"); |
318 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | 316 | for_each_dma_cap_mask(cap, dma_cap_mask_all) |
319 | if (channel_table[cap]) | 317 | if (channel_table[cap]) |
320 | free_percpu(channel_table[cap]); | 318 | free_percpu(channel_table[cap]); |
@@ -334,20 +332,6 @@ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) | |||
334 | } | 332 | } |
335 | EXPORT_SYMBOL(dma_find_channel); | 333 | EXPORT_SYMBOL(dma_find_channel); |
336 | 334 | ||
337 | /* | ||
338 | * net_dma_find_channel - find a channel for net_dma | ||
339 | * net_dma has alignment requirements | ||
340 | */ | ||
341 | struct dma_chan *net_dma_find_channel(void) | ||
342 | { | ||
343 | struct dma_chan *chan = dma_find_channel(DMA_MEMCPY); | ||
344 | if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1)) | ||
345 | return NULL; | ||
346 | |||
347 | return chan; | ||
348 | } | ||
349 | EXPORT_SYMBOL(net_dma_find_channel); | ||
350 | |||
351 | /** | 335 | /** |
352 | * dma_issue_pending_all - flush all pending operations across all channels | 336 | * dma_issue_pending_all - flush all pending operations across all channels |
353 | */ | 337 | */ |
@@ -522,12 +506,12 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v | |||
522 | err = dma_chan_get(chan); | 506 | err = dma_chan_get(chan); |
523 | 507 | ||
524 | if (err == -ENODEV) { | 508 | if (err == -ENODEV) { |
525 | pr_debug("%s: %s module removed\n", | 509 | pr_debug("%s: %s module removed\n", __func__, |
526 | __func__, dma_chan_name(chan)); | 510 | dma_chan_name(chan)); |
527 | list_del_rcu(&device->global_node); | 511 | list_del_rcu(&device->global_node); |
528 | } else if (err) | 512 | } else if (err) |
529 | pr_debug("%s: failed to get %s: (%d)\n", | 513 | pr_debug("dmaengine: failed to get %s: (%d)\n", |
530 | __func__, dma_chan_name(chan), err); | 514 | dma_chan_name(chan), err); |
531 | else | 515 | else |
532 | break; | 516 | break; |
533 | if (--device->privatecnt == 0) | 517 | if (--device->privatecnt == 0) |
@@ -537,9 +521,7 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v | |||
537 | } | 521 | } |
538 | mutex_unlock(&dma_list_mutex); | 522 | mutex_unlock(&dma_list_mutex); |
539 | 523 | ||
540 | pr_debug("%s: %s (%s)\n", | 524 | pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail", |
541 | __func__, | ||
542 | chan ? "success" : "fail", | ||
543 | chan ? dma_chan_name(chan) : NULL); | 525 | chan ? dma_chan_name(chan) : NULL); |
544 | 526 | ||
545 | return chan; | 527 | return chan; |
@@ -582,8 +564,8 @@ void dmaengine_get(void) | |||
582 | list_del_rcu(&device->global_node); | 564 | list_del_rcu(&device->global_node); |
583 | break; | 565 | break; |
584 | } else if (err) | 566 | } else if (err) |
585 | pr_debug("%s: failed to get %s: (%d)\n", | 567 | pr_err("dmaengine: failed to get %s: (%d)\n", |
586 | __func__, dma_chan_name(chan), err); | 568 | dma_chan_name(chan), err); |
587 | } | 569 | } |
588 | } | 570 | } |
589 | 571 | ||
@@ -711,12 +693,12 @@ int dma_async_device_register(struct dma_device *device) | |||
711 | !device->device_prep_dma_interrupt); | 693 | !device->device_prep_dma_interrupt); |
712 | BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && | 694 | BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && |
713 | !device->device_prep_dma_sg); | 695 | !device->device_prep_dma_sg); |
696 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && | ||
697 | !device->device_prep_slave_sg); | ||
714 | BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && | 698 | BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && |
715 | !device->device_prep_dma_cyclic); | 699 | !device->device_prep_dma_cyclic); |
716 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && | 700 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && |
717 | !device->device_control); | 701 | !device->device_control); |
718 | BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && | ||
719 | !device->device_prep_interleaved_dma); | ||
720 | 702 | ||
721 | BUG_ON(!device->device_alloc_chan_resources); | 703 | BUG_ON(!device->device_alloc_chan_resources); |
722 | BUG_ON(!device->device_free_chan_resources); | 704 | BUG_ON(!device->device_free_chan_resources); |
@@ -1019,7 +1001,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | |||
1019 | while (tx->cookie == -EBUSY) { | 1001 | while (tx->cookie == -EBUSY) { |
1020 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { | 1002 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { |
1021 | pr_err("%s timeout waiting for descriptor submission\n", | 1003 | pr_err("%s timeout waiting for descriptor submission\n", |
1022 | __func__); | 1004 | __func__); |
1023 | return DMA_ERROR; | 1005 | return DMA_ERROR; |
1024 | } | 1006 | } |
1025 | cpu_relax(); | 1007 | cpu_relax(); |
diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h deleted file mode 100644 index 17f983a4e9b..00000000000 --- a/drivers/dma/dmaengine.h +++ /dev/null | |||
@@ -1,89 +0,0 @@ | |||
1 | /* | ||
2 | * The contents of this file are private to DMA engine drivers, and is not | ||
3 | * part of the API to be used by DMA engine users. | ||
4 | */ | ||
5 | #ifndef DMAENGINE_H | ||
6 | #define DMAENGINE_H | ||
7 | |||
8 | #include <linux/bug.h> | ||
9 | #include <linux/dmaengine.h> | ||
10 | |||
11 | /** | ||
12 | * dma_cookie_init - initialize the cookies for a DMA channel | ||
13 | * @chan: dma channel to initialize | ||
14 | */ | ||
15 | static inline void dma_cookie_init(struct dma_chan *chan) | ||
16 | { | ||
17 | chan->cookie = DMA_MIN_COOKIE; | ||
18 | chan->completed_cookie = DMA_MIN_COOKIE; | ||
19 | } | ||
20 | |||
21 | /** | ||
22 | * dma_cookie_assign - assign a DMA engine cookie to the descriptor | ||
23 | * @tx: descriptor needing cookie | ||
24 | * | ||
25 | * Assign a unique non-zero per-channel cookie to the descriptor. | ||
26 | * Note: caller is expected to hold a lock to prevent concurrency. | ||
27 | */ | ||
28 | static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx) | ||
29 | { | ||
30 | struct dma_chan *chan = tx->chan; | ||
31 | dma_cookie_t cookie; | ||
32 | |||
33 | cookie = chan->cookie + 1; | ||
34 | if (cookie < DMA_MIN_COOKIE) | ||
35 | cookie = DMA_MIN_COOKIE; | ||
36 | tx->cookie = chan->cookie = cookie; | ||
37 | |||
38 | return cookie; | ||
39 | } | ||
40 | |||
41 | /** | ||
42 | * dma_cookie_complete - complete a descriptor | ||
43 | * @tx: descriptor to complete | ||
44 | * | ||
45 | * Mark this descriptor complete by updating the channels completed | ||
46 | * cookie marker. Zero the descriptors cookie to prevent accidental | ||
47 | * repeated completions. | ||
48 | * | ||
49 | * Note: caller is expected to hold a lock to prevent concurrency. | ||
50 | */ | ||
51 | static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx) | ||
52 | { | ||
53 | BUG_ON(tx->cookie < DMA_MIN_COOKIE); | ||
54 | tx->chan->completed_cookie = tx->cookie; | ||
55 | tx->cookie = 0; | ||
56 | } | ||
57 | |||
58 | /** | ||
59 | * dma_cookie_status - report cookie status | ||
60 | * @chan: dma channel | ||
61 | * @cookie: cookie we are interested in | ||
62 | * @state: dma_tx_state structure to return last/used cookies | ||
63 | * | ||
64 | * Report the status of the cookie, filling in the state structure if | ||
65 | * non-NULL. No locking is required. | ||
66 | */ | ||
67 | static inline enum dma_status dma_cookie_status(struct dma_chan *chan, | ||
68 | dma_cookie_t cookie, struct dma_tx_state *state) | ||
69 | { | ||
70 | dma_cookie_t used, complete; | ||
71 | |||
72 | used = chan->cookie; | ||
73 | complete = chan->completed_cookie; | ||
74 | barrier(); | ||
75 | if (state) { | ||
76 | state->last = complete; | ||
77 | state->used = used; | ||
78 | state->residue = 0; | ||
79 | } | ||
80 | return dma_async_is_complete(cookie, complete, used); | ||
81 | } | ||
82 | |||
83 | static inline void dma_set_residue(struct dma_tx_state *state, u32 residue) | ||
84 | { | ||
85 | if (state) | ||
86 | state->residue = residue; | ||
87 | } | ||
88 | |||
89 | #endif | ||
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 64b048d7fba..765f5ff2230 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
@@ -10,7 +10,6 @@ | |||
10 | #include <linux/delay.h> | 10 | #include <linux/delay.h> |
11 | #include <linux/dma-mapping.h> | 11 | #include <linux/dma-mapping.h> |
12 | #include <linux/dmaengine.h> | 12 | #include <linux/dmaengine.h> |
13 | #include <linux/freezer.h> | ||
14 | #include <linux/init.h> | 13 | #include <linux/init.h> |
15 | #include <linux/kthread.h> | 14 | #include <linux/kthread.h> |
16 | #include <linux/module.h> | 15 | #include <linux/module.h> |
@@ -214,32 +213,9 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start, | |||
214 | return error_count; | 213 | return error_count; |
215 | } | 214 | } |
216 | 215 | ||
217 | /* poor man's completion - we want to use wait_event_freezable() on it */ | 216 | static void dmatest_callback(void *completion) |
218 | struct dmatest_done { | ||
219 | bool done; | ||
220 | wait_queue_head_t *wait; | ||
221 | }; | ||
222 | |||
223 | static void dmatest_callback(void *arg) | ||
224 | { | ||
225 | struct dmatest_done *done = arg; | ||
226 | |||
227 | done->done = true; | ||
228 | wake_up_all(done->wait); | ||
229 | } | ||
230 | |||
231 | static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len, | ||
232 | unsigned int count) | ||
233 | { | ||
234 | while (count--) | ||
235 | dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE); | ||
236 | } | ||
237 | |||
238 | static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len, | ||
239 | unsigned int count) | ||
240 | { | 217 | { |
241 | while (count--) | 218 | complete(completion); |
242 | dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL); | ||
243 | } | 219 | } |
244 | 220 | ||
245 | /* | 221 | /* |
@@ -258,9 +234,7 @@ static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len, | |||
258 | */ | 234 | */ |
259 | static int dmatest_func(void *data) | 235 | static int dmatest_func(void *data) |
260 | { | 236 | { |
261 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait); | ||
262 | struct dmatest_thread *thread = data; | 237 | struct dmatest_thread *thread = data; |
263 | struct dmatest_done done = { .wait = &done_wait }; | ||
264 | struct dma_chan *chan; | 238 | struct dma_chan *chan; |
265 | const char *thread_name; | 239 | const char *thread_name; |
266 | unsigned int src_off, dst_off, len; | 240 | unsigned int src_off, dst_off, len; |
@@ -277,7 +251,6 @@ static int dmatest_func(void *data) | |||
277 | int i; | 251 | int i; |
278 | 252 | ||
279 | thread_name = current->comm; | 253 | thread_name = current->comm; |
280 | set_freezable(); | ||
281 | 254 | ||
282 | ret = -ENOMEM; | 255 | ret = -ENOMEM; |
283 | 256 | ||
@@ -331,6 +304,8 @@ static int dmatest_func(void *data) | |||
331 | struct dma_async_tx_descriptor *tx = NULL; | 304 | struct dma_async_tx_descriptor *tx = NULL; |
332 | dma_addr_t dma_srcs[src_cnt]; | 305 | dma_addr_t dma_srcs[src_cnt]; |
333 | dma_addr_t dma_dsts[dst_cnt]; | 306 | dma_addr_t dma_dsts[dst_cnt]; |
307 | struct completion cmp; | ||
308 | unsigned long tmo = msecs_to_jiffies(timeout); | ||
334 | u8 align = 0; | 309 | u8 align = 0; |
335 | 310 | ||
336 | total_tests++; | 311 | total_tests++; |
@@ -367,35 +342,15 @@ static int dmatest_func(void *data) | |||
367 | 342 | ||
368 | dma_srcs[i] = dma_map_single(dev->dev, buf, len, | 343 | dma_srcs[i] = dma_map_single(dev->dev, buf, len, |
369 | DMA_TO_DEVICE); | 344 | DMA_TO_DEVICE); |
370 | ret = dma_mapping_error(dev->dev, dma_srcs[i]); | ||
371 | if (ret) { | ||
372 | unmap_src(dev->dev, dma_srcs, len, i); | ||
373 | pr_warn("%s: #%u: mapping error %d with " | ||
374 | "src_off=0x%x len=0x%x\n", | ||
375 | thread_name, total_tests - 1, ret, | ||
376 | src_off, len); | ||
377 | failed_tests++; | ||
378 | continue; | ||
379 | } | ||
380 | } | 345 | } |
381 | /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ | 346 | /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ |
382 | for (i = 0; i < dst_cnt; i++) { | 347 | for (i = 0; i < dst_cnt; i++) { |
383 | dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i], | 348 | dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i], |
384 | test_buf_size, | 349 | test_buf_size, |
385 | DMA_BIDIRECTIONAL); | 350 | DMA_BIDIRECTIONAL); |
386 | ret = dma_mapping_error(dev->dev, dma_dsts[i]); | ||
387 | if (ret) { | ||
388 | unmap_src(dev->dev, dma_srcs, len, src_cnt); | ||
389 | unmap_dst(dev->dev, dma_dsts, test_buf_size, i); | ||
390 | pr_warn("%s: #%u: mapping error %d with " | ||
391 | "dst_off=0x%x len=0x%x\n", | ||
392 | thread_name, total_tests - 1, ret, | ||
393 | dst_off, test_buf_size); | ||
394 | failed_tests++; | ||
395 | continue; | ||
396 | } | ||
397 | } | 351 | } |
398 | 352 | ||
353 | |||
399 | if (thread->type == DMA_MEMCPY) | 354 | if (thread->type == DMA_MEMCPY) |
400 | tx = dev->device_prep_dma_memcpy(chan, | 355 | tx = dev->device_prep_dma_memcpy(chan, |
401 | dma_dsts[0] + dst_off, | 356 | dma_dsts[0] + dst_off, |
@@ -417,8 +372,13 @@ static int dmatest_func(void *data) | |||
417 | } | 372 | } |
418 | 373 | ||
419 | if (!tx) { | 374 | if (!tx) { |
420 | unmap_src(dev->dev, dma_srcs, len, src_cnt); | 375 | for (i = 0; i < src_cnt; i++) |
421 | unmap_dst(dev->dev, dma_dsts, test_buf_size, dst_cnt); | 376 | dma_unmap_single(dev->dev, dma_srcs[i], len, |
377 | DMA_TO_DEVICE); | ||
378 | for (i = 0; i < dst_cnt; i++) | ||
379 | dma_unmap_single(dev->dev, dma_dsts[i], | ||
380 | test_buf_size, | ||
381 | DMA_BIDIRECTIONAL); | ||
422 | pr_warning("%s: #%u: prep error with src_off=0x%x " | 382 | pr_warning("%s: #%u: prep error with src_off=0x%x " |
423 | "dst_off=0x%x len=0x%x\n", | 383 | "dst_off=0x%x len=0x%x\n", |
424 | thread_name, total_tests - 1, | 384 | thread_name, total_tests - 1, |
@@ -428,9 +388,9 @@ static int dmatest_func(void *data) | |||
428 | continue; | 388 | continue; |
429 | } | 389 | } |
430 | 390 | ||
431 | done.done = false; | 391 | init_completion(&cmp); |
432 | tx->callback = dmatest_callback; | 392 | tx->callback = dmatest_callback; |
433 | tx->callback_param = &done; | 393 | tx->callback_param = &cmp; |
434 | cookie = tx->tx_submit(tx); | 394 | cookie = tx->tx_submit(tx); |
435 | 395 | ||
436 | if (dma_submit_error(cookie)) { | 396 | if (dma_submit_error(cookie)) { |
@@ -444,20 +404,10 @@ static int dmatest_func(void *data) | |||
444 | } | 404 | } |
445 | dma_async_issue_pending(chan); | 405 | dma_async_issue_pending(chan); |
446 | 406 | ||
447 | wait_event_freezable_timeout(done_wait, done.done, | 407 | tmo = wait_for_completion_timeout(&cmp, tmo); |
448 | msecs_to_jiffies(timeout)); | ||
449 | |||
450 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); | 408 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); |
451 | 409 | ||
452 | if (!done.done) { | 410 | if (tmo == 0) { |
453 | /* | ||
454 | * We're leaving the timed out dma operation with | ||
455 | * dangling pointer to done_wait. To make this | ||
456 | * correct, we'll need to allocate wait_done for | ||
457 | * each test iteration and perform "who's gonna | ||
458 | * free it this time?" dancing. For now, just | ||
459 | * leave it dangling. | ||
460 | */ | ||
461 | pr_warning("%s: #%u: test timed out\n", | 411 | pr_warning("%s: #%u: test timed out\n", |
462 | thread_name, total_tests - 1); | 412 | thread_name, total_tests - 1); |
463 | failed_tests++; | 413 | failed_tests++; |
@@ -472,7 +422,9 @@ static int dmatest_func(void *data) | |||
472 | } | 422 | } |
473 | 423 | ||
474 | /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */ | 424 | /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */ |
475 | unmap_dst(dev->dev, dma_dsts, test_buf_size, dst_cnt); | 425 | for (i = 0; i < dst_cnt; i++) |
426 | dma_unmap_single(dev->dev, dma_dsts[i], test_buf_size, | ||
427 | DMA_BIDIRECTIONAL); | ||
476 | 428 | ||
477 | error_count = 0; | 429 | error_count = 0; |
478 | 430 | ||
@@ -525,8 +477,6 @@ err_srcs: | |||
525 | pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", | 477 | pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", |
526 | thread_name, total_tests, failed_tests, ret); | 478 | thread_name, total_tests, failed_tests, ret); |
527 | 479 | ||
528 | /* terminate all transfers on specified channels */ | ||
529 | chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); | ||
530 | if (iterations > 0) | 480 | if (iterations > 0) |
531 | while (!kthread_should_stop()) { | 481 | while (!kthread_should_stop()) { |
532 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); | 482 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); |
@@ -549,10 +499,6 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc) | |||
549 | list_del(&thread->node); | 499 | list_del(&thread->node); |
550 | kfree(thread); | 500 | kfree(thread); |
551 | } | 501 | } |
552 | |||
553 | /* terminate all transfers on specified channels */ | ||
554 | dtc->chan->device->device_control(dtc->chan, DMA_TERMINATE_ALL, 0); | ||
555 | |||
556 | kfree(dtc); | 502 | kfree(dtc); |
557 | } | 503 | } |
558 | 504 | ||
@@ -626,7 +572,7 @@ static int dmatest_add_channel(struct dma_chan *chan) | |||
626 | } | 572 | } |
627 | if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { | 573 | if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { |
628 | cnt = dmatest_add_threads(dtc, DMA_PQ); | 574 | cnt = dmatest_add_threads(dtc, DMA_PQ); |
629 | thread_count += cnt > 0 ? cnt : 0; | 575 | thread_count += cnt > 0 ?: 0; |
630 | } | 576 | } |
631 | 577 | ||
632 | pr_info("dmatest: Started %u threads using %s\n", | 578 | pr_info("dmatest: Started %u threads using %s\n", |
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 3e8ba02ba29..4d180ca9a1d 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c | |||
@@ -9,7 +9,6 @@ | |||
9 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | */ | 11 | */ |
12 | #include <linux/bitops.h> | ||
13 | #include <linux/clk.h> | 12 | #include <linux/clk.h> |
14 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
15 | #include <linux/dmaengine.h> | 14 | #include <linux/dmaengine.h> |
@@ -17,14 +16,12 @@ | |||
17 | #include <linux/init.h> | 16 | #include <linux/init.h> |
18 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
19 | #include <linux/io.h> | 18 | #include <linux/io.h> |
20 | #include <linux/of.h> | ||
21 | #include <linux/mm.h> | 19 | #include <linux/mm.h> |
22 | #include <linux/module.h> | 20 | #include <linux/module.h> |
23 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
24 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
25 | 23 | ||
26 | #include "dw_dmac_regs.h" | 24 | #include "dw_dmac_regs.h" |
27 | #include "dmaengine.h" | ||
28 | 25 | ||
29 | /* | 26 | /* |
30 | * This supports the Synopsys "DesignWare AHB Central DMA Controller", | 27 | * This supports the Synopsys "DesignWare AHB Central DMA Controller", |
@@ -36,36 +33,32 @@ | |||
36 | * which does not support descriptor writeback. | 33 | * which does not support descriptor writeback. |
37 | */ | 34 | */ |
38 | 35 | ||
39 | static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave) | 36 | #define DWC_DEFAULT_CTLLO(private) ({ \ |
40 | { | 37 | struct dw_dma_slave *__slave = (private); \ |
41 | return slave ? slave->dst_master : 0; | 38 | int dms = __slave ? __slave->dst_master : 0; \ |
42 | } | 39 | int sms = __slave ? __slave->src_master : 1; \ |
43 | 40 | u8 smsize = __slave ? __slave->src_msize : DW_DMA_MSIZE_16; \ | |
44 | static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave) | 41 | u8 dmsize = __slave ? __slave->dst_msize : DW_DMA_MSIZE_16; \ |
45 | { | ||
46 | return slave ? slave->src_master : 1; | ||
47 | } | ||
48 | |||
49 | #define DWC_DEFAULT_CTLLO(_chan) ({ \ | ||
50 | struct dw_dma_slave *__slave = (_chan->private); \ | ||
51 | struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ | ||
52 | struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ | ||
53 | int _dms = dwc_get_dms(__slave); \ | ||
54 | int _sms = dwc_get_sms(__slave); \ | ||
55 | u8 _smsize = __slave ? _sconfig->src_maxburst : \ | ||
56 | DW_DMA_MSIZE_16; \ | ||
57 | u8 _dmsize = __slave ? _sconfig->dst_maxburst : \ | ||
58 | DW_DMA_MSIZE_16; \ | ||
59 | \ | 42 | \ |
60 | (DWC_CTLL_DST_MSIZE(_dmsize) \ | 43 | (DWC_CTLL_DST_MSIZE(dmsize) \ |
61 | | DWC_CTLL_SRC_MSIZE(_smsize) \ | 44 | | DWC_CTLL_SRC_MSIZE(smsize) \ |
62 | | DWC_CTLL_LLP_D_EN \ | 45 | | DWC_CTLL_LLP_D_EN \ |
63 | | DWC_CTLL_LLP_S_EN \ | 46 | | DWC_CTLL_LLP_S_EN \ |
64 | | DWC_CTLL_DMS(_dms) \ | 47 | | DWC_CTLL_DMS(dms) \ |
65 | | DWC_CTLL_SMS(_sms)); \ | 48 | | DWC_CTLL_SMS(sms)); \ |
66 | }) | 49 | }) |
67 | 50 | ||
68 | /* | 51 | /* |
52 | * This is configuration-dependent and usually a funny size like 4095. | ||
53 | * | ||
54 | * Note that this is a transfer count, i.e. if we transfer 32-bit | ||
55 | * words, we can do 16380 bytes per descriptor. | ||
56 | * | ||
57 | * This parameter is also system-specific. | ||
58 | */ | ||
59 | #define DWC_MAX_COUNT 4095U | ||
60 | |||
61 | /* | ||
69 | * Number of descriptors to allocate for each channel. This should be | 62 | * Number of descriptors to allocate for each channel. This should be |
70 | * made configurable somehow; preferably, the clients (at least the | 63 | * made configurable somehow; preferably, the clients (at least the |
71 | * ones using slave transfers) should be able to give us a hint. | 64 | * ones using slave transfers) should be able to give us a hint. |
@@ -105,13 +98,13 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) | |||
105 | 98 | ||
106 | spin_lock_irqsave(&dwc->lock, flags); | 99 | spin_lock_irqsave(&dwc->lock, flags); |
107 | list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { | 100 | list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { |
108 | i++; | ||
109 | if (async_tx_test_ack(&desc->txd)) { | 101 | if (async_tx_test_ack(&desc->txd)) { |
110 | list_del(&desc->desc_node); | 102 | list_del(&desc->desc_node); |
111 | ret = desc; | 103 | ret = desc; |
112 | break; | 104 | break; |
113 | } | 105 | } |
114 | dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); | 106 | dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); |
107 | i++; | ||
115 | } | 108 | } |
116 | spin_unlock_irqrestore(&dwc->lock, flags); | 109 | spin_unlock_irqrestore(&dwc->lock, flags); |
117 | 110 | ||
@@ -158,136 +151,44 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
158 | } | 151 | } |
159 | } | 152 | } |
160 | 153 | ||
161 | static void dwc_initialize(struct dw_dma_chan *dwc) | 154 | /* Called with dwc->lock held and bh disabled */ |
162 | { | 155 | static dma_cookie_t |
163 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | 156 | dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc) |
164 | struct dw_dma_slave *dws = dwc->chan.private; | ||
165 | u32 cfghi = DWC_CFGH_FIFO_MODE; | ||
166 | u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); | ||
167 | |||
168 | if (dwc->initialized == true) | ||
169 | return; | ||
170 | |||
171 | if (dws) { | ||
172 | /* | ||
173 | * We need controller-specific data to set up slave | ||
174 | * transfers. | ||
175 | */ | ||
176 | BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); | ||
177 | |||
178 | cfghi = dws->cfg_hi; | ||
179 | cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; | ||
180 | } else { | ||
181 | if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV) | ||
182 | cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id); | ||
183 | else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM) | ||
184 | cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id); | ||
185 | } | ||
186 | |||
187 | channel_writel(dwc, CFG_LO, cfglo); | ||
188 | channel_writel(dwc, CFG_HI, cfghi); | ||
189 | |||
190 | /* Enable interrupts */ | ||
191 | channel_set_bit(dw, MASK.XFER, dwc->mask); | ||
192 | channel_set_bit(dw, MASK.ERROR, dwc->mask); | ||
193 | |||
194 | dwc->initialized = true; | ||
195 | } | ||
196 | |||
197 | /*----------------------------------------------------------------------*/ | ||
198 | |||
199 | static inline unsigned int dwc_fast_fls(unsigned long long v) | ||
200 | { | 157 | { |
201 | /* | 158 | dma_cookie_t cookie = dwc->chan.cookie; |
202 | * We can be a lot more clever here, but this should take care | ||
203 | * of the most common optimization. | ||
204 | */ | ||
205 | if (!(v & 7)) | ||
206 | return 3; | ||
207 | else if (!(v & 3)) | ||
208 | return 2; | ||
209 | else if (!(v & 1)) | ||
210 | return 1; | ||
211 | return 0; | ||
212 | } | ||
213 | 159 | ||
214 | static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc) | 160 | if (++cookie < 0) |
215 | { | 161 | cookie = 1; |
216 | dev_err(chan2dev(&dwc->chan), | ||
217 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | ||
218 | channel_readl(dwc, SAR), | ||
219 | channel_readl(dwc, DAR), | ||
220 | channel_readl(dwc, LLP), | ||
221 | channel_readl(dwc, CTL_HI), | ||
222 | channel_readl(dwc, CTL_LO)); | ||
223 | } | ||
224 | 162 | ||
163 | dwc->chan.cookie = cookie; | ||
164 | desc->txd.cookie = cookie; | ||
225 | 165 | ||
226 | static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) | 166 | return cookie; |
227 | { | ||
228 | channel_clear_bit(dw, CH_EN, dwc->mask); | ||
229 | while (dma_readl(dw, CH_EN) & dwc->mask) | ||
230 | cpu_relax(); | ||
231 | } | 167 | } |
232 | 168 | ||
233 | /*----------------------------------------------------------------------*/ | 169 | /*----------------------------------------------------------------------*/ |
234 | 170 | ||
235 | /* Perform single block transfer */ | ||
236 | static inline void dwc_do_single_block(struct dw_dma_chan *dwc, | ||
237 | struct dw_desc *desc) | ||
238 | { | ||
239 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | ||
240 | u32 ctllo; | ||
241 | |||
242 | /* Software emulation of LLP mode relies on interrupts to continue | ||
243 | * multi block transfer. */ | ||
244 | ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN; | ||
245 | |||
246 | channel_writel(dwc, SAR, desc->lli.sar); | ||
247 | channel_writel(dwc, DAR, desc->lli.dar); | ||
248 | channel_writel(dwc, CTL_LO, ctllo); | ||
249 | channel_writel(dwc, CTL_HI, desc->lli.ctlhi); | ||
250 | channel_set_bit(dw, CH_EN, dwc->mask); | ||
251 | } | ||
252 | |||
253 | /* Called with dwc->lock held and bh disabled */ | 171 | /* Called with dwc->lock held and bh disabled */ |
254 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | 172 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) |
255 | { | 173 | { |
256 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | 174 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
257 | unsigned long was_soft_llp; | ||
258 | 175 | ||
259 | /* ASSERT: channel is idle */ | 176 | /* ASSERT: channel is idle */ |
260 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 177 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
261 | dev_err(chan2dev(&dwc->chan), | 178 | dev_err(chan2dev(&dwc->chan), |
262 | "BUG: Attempted to start non-idle channel\n"); | 179 | "BUG: Attempted to start non-idle channel\n"); |
263 | dwc_dump_chan_regs(dwc); | 180 | dev_err(chan2dev(&dwc->chan), |
181 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | ||
182 | channel_readl(dwc, SAR), | ||
183 | channel_readl(dwc, DAR), | ||
184 | channel_readl(dwc, LLP), | ||
185 | channel_readl(dwc, CTL_HI), | ||
186 | channel_readl(dwc, CTL_LO)); | ||
264 | 187 | ||
265 | /* The tasklet will hopefully advance the queue... */ | 188 | /* The tasklet will hopefully advance the queue... */ |
266 | return; | 189 | return; |
267 | } | 190 | } |
268 | 191 | ||
269 | if (dwc->nollp) { | ||
270 | was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP, | ||
271 | &dwc->flags); | ||
272 | if (was_soft_llp) { | ||
273 | dev_err(chan2dev(&dwc->chan), | ||
274 | "BUG: Attempted to start new LLP transfer " | ||
275 | "inside ongoing one\n"); | ||
276 | return; | ||
277 | } | ||
278 | |||
279 | dwc_initialize(dwc); | ||
280 | |||
281 | dwc->tx_list = &first->tx_list; | ||
282 | dwc->tx_node_active = first->tx_list.next; | ||
283 | |||
284 | dwc_do_single_block(dwc, first); | ||
285 | |||
286 | return; | ||
287 | } | ||
288 | |||
289 | dwc_initialize(dwc); | ||
290 | |||
291 | channel_writel(dwc, LLP, first->txd.phys); | 192 | channel_writel(dwc, LLP, first->txd.phys); |
292 | channel_writel(dwc, CTL_LO, | 193 | channel_writel(dwc, CTL_LO, |
293 | DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | 194 | DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); |
@@ -310,7 +211,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, | |||
310 | dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); | 211 | dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); |
311 | 212 | ||
312 | spin_lock_irqsave(&dwc->lock, flags); | 213 | spin_lock_irqsave(&dwc->lock, flags); |
313 | dma_cookie_complete(txd); | 214 | dwc->completed = txd->cookie; |
314 | if (callback_required) { | 215 | if (callback_required) { |
315 | callback = txd->callback; | 216 | callback = txd->callback; |
316 | param = txd->callback_param; | 217 | param = txd->callback_param; |
@@ -364,7 +265,9 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
364 | "BUG: XFER bit set, but channel not idle!\n"); | 265 | "BUG: XFER bit set, but channel not idle!\n"); |
365 | 266 | ||
366 | /* Try to continue after resetting the channel... */ | 267 | /* Try to continue after resetting the channel... */ |
367 | dwc_chan_disable(dw, dwc); | 268 | channel_clear_bit(dw, CH_EN, dwc->mask); |
269 | while (dma_readl(dw, CH_EN) & dwc->mask) | ||
270 | cpu_relax(); | ||
368 | } | 271 | } |
369 | 272 | ||
370 | /* | 273 | /* |
@@ -392,6 +295,12 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
392 | unsigned long flags; | 295 | unsigned long flags; |
393 | 296 | ||
394 | spin_lock_irqsave(&dwc->lock, flags); | 297 | spin_lock_irqsave(&dwc->lock, flags); |
298 | /* | ||
299 | * Clear block interrupt flag before scanning so that we don't | ||
300 | * miss any, and read LLP before RAW_XFER to ensure it is | ||
301 | * valid if we decide to scan the list. | ||
302 | */ | ||
303 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); | ||
395 | llp = channel_readl(dwc, LLP); | 304 | llp = channel_readl(dwc, LLP); |
396 | status_xfer = dma_readl(dw, RAW.XFER); | 305 | status_xfer = dma_readl(dw, RAW.XFER); |
397 | 306 | ||
@@ -409,8 +318,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
409 | return; | 318 | return; |
410 | } | 319 | } |
411 | 320 | ||
412 | dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__, | 321 | dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); |
413 | (unsigned long long)llp); | ||
414 | 322 | ||
415 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { | 323 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { |
416 | /* check first descriptors addr */ | 324 | /* check first descriptors addr */ |
@@ -446,7 +354,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
446 | "BUG: All descriptors done, but channel not idle!\n"); | 354 | "BUG: All descriptors done, but channel not idle!\n"); |
447 | 355 | ||
448 | /* Try to continue after resetting the channel... */ | 356 | /* Try to continue after resetting the channel... */ |
449 | dwc_chan_disable(dw, dwc); | 357 | channel_clear_bit(dw, CH_EN, dwc->mask); |
358 | while (dma_readl(dw, CH_EN) & dwc->mask) | ||
359 | cpu_relax(); | ||
450 | 360 | ||
451 | if (!list_empty(&dwc->queue)) { | 361 | if (!list_empty(&dwc->queue)) { |
452 | list_move(dwc->queue.next, &dwc->active_list); | 362 | list_move(dwc->queue.next, &dwc->active_list); |
@@ -455,11 +365,12 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
455 | spin_unlock_irqrestore(&dwc->lock, flags); | 365 | spin_unlock_irqrestore(&dwc->lock, flags); |
456 | } | 366 | } |
457 | 367 | ||
458 | static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) | 368 | static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) |
459 | { | 369 | { |
460 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), | 370 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
461 | " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", | 371 | " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", |
462 | lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo); | 372 | lli->sar, lli->dar, lli->llp, |
373 | lli->ctlhi, lli->ctllo); | ||
463 | } | 374 | } |
464 | 375 | ||
465 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | 376 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) |
@@ -525,16 +436,17 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr); | |||
525 | 436 | ||
526 | /* called with dwc->lock held and all DMAC interrupts disabled */ | 437 | /* called with dwc->lock held and all DMAC interrupts disabled */ |
527 | static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, | 438 | static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, |
528 | u32 status_err, u32 status_xfer) | 439 | u32 status_block, u32 status_err, u32 status_xfer) |
529 | { | 440 | { |
530 | unsigned long flags; | 441 | unsigned long flags; |
531 | 442 | ||
532 | if (dwc->mask) { | 443 | if (status_block & dwc->mask) { |
533 | void (*callback)(void *param); | 444 | void (*callback)(void *param); |
534 | void *callback_param; | 445 | void *callback_param; |
535 | 446 | ||
536 | dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", | 447 | dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", |
537 | channel_readl(dwc, LLP)); | 448 | channel_readl(dwc, LLP)); |
449 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); | ||
538 | 450 | ||
539 | callback = dwc->cdesc->period_callback; | 451 | callback = dwc->cdesc->period_callback; |
540 | callback_param = dwc->cdesc->period_callback_param; | 452 | callback_param = dwc->cdesc->period_callback_param; |
@@ -557,15 +469,24 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, | |||
557 | 469 | ||
558 | spin_lock_irqsave(&dwc->lock, flags); | 470 | spin_lock_irqsave(&dwc->lock, flags); |
559 | 471 | ||
560 | dwc_dump_chan_regs(dwc); | 472 | dev_err(chan2dev(&dwc->chan), |
473 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | ||
474 | channel_readl(dwc, SAR), | ||
475 | channel_readl(dwc, DAR), | ||
476 | channel_readl(dwc, LLP), | ||
477 | channel_readl(dwc, CTL_HI), | ||
478 | channel_readl(dwc, CTL_LO)); | ||
561 | 479 | ||
562 | dwc_chan_disable(dw, dwc); | 480 | channel_clear_bit(dw, CH_EN, dwc->mask); |
481 | while (dma_readl(dw, CH_EN) & dwc->mask) | ||
482 | cpu_relax(); | ||
563 | 483 | ||
564 | /* make sure DMA does not restart by loading a new list */ | 484 | /* make sure DMA does not restart by loading a new list */ |
565 | channel_writel(dwc, LLP, 0); | 485 | channel_writel(dwc, LLP, 0); |
566 | channel_writel(dwc, CTL_LO, 0); | 486 | channel_writel(dwc, CTL_LO, 0); |
567 | channel_writel(dwc, CTL_HI, 0); | 487 | channel_writel(dwc, CTL_HI, 0); |
568 | 488 | ||
489 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); | ||
569 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | 490 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
570 | dma_writel(dw, CLEAR.XFER, dwc->mask); | 491 | dma_writel(dw, CLEAR.XFER, dwc->mask); |
571 | 492 | ||
@@ -582,57 +503,36 @@ static void dw_dma_tasklet(unsigned long data) | |||
582 | { | 503 | { |
583 | struct dw_dma *dw = (struct dw_dma *)data; | 504 | struct dw_dma *dw = (struct dw_dma *)data; |
584 | struct dw_dma_chan *dwc; | 505 | struct dw_dma_chan *dwc; |
506 | u32 status_block; | ||
585 | u32 status_xfer; | 507 | u32 status_xfer; |
586 | u32 status_err; | 508 | u32 status_err; |
587 | int i; | 509 | int i; |
588 | 510 | ||
511 | status_block = dma_readl(dw, RAW.BLOCK); | ||
589 | status_xfer = dma_readl(dw, RAW.XFER); | 512 | status_xfer = dma_readl(dw, RAW.XFER); |
590 | status_err = dma_readl(dw, RAW.ERROR); | 513 | status_err = dma_readl(dw, RAW.ERROR); |
591 | 514 | ||
592 | dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err); | 515 | dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n", |
516 | status_block, status_err); | ||
593 | 517 | ||
594 | for (i = 0; i < dw->dma.chancnt; i++) { | 518 | for (i = 0; i < dw->dma.chancnt; i++) { |
595 | dwc = &dw->chan[i]; | 519 | dwc = &dw->chan[i]; |
596 | if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) | 520 | if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) |
597 | dwc_handle_cyclic(dw, dwc, status_err, status_xfer); | 521 | dwc_handle_cyclic(dw, dwc, status_block, status_err, |
522 | status_xfer); | ||
598 | else if (status_err & (1 << i)) | 523 | else if (status_err & (1 << i)) |
599 | dwc_handle_error(dw, dwc); | 524 | dwc_handle_error(dw, dwc); |
600 | else if (status_xfer & (1 << i)) { | 525 | else if ((status_block | status_xfer) & (1 << i)) |
601 | unsigned long flags; | ||
602 | |||
603 | spin_lock_irqsave(&dwc->lock, flags); | ||
604 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { | ||
605 | if (dwc->tx_node_active != dwc->tx_list) { | ||
606 | struct dw_desc *desc = | ||
607 | list_entry(dwc->tx_node_active, | ||
608 | struct dw_desc, | ||
609 | desc_node); | ||
610 | |||
611 | dma_writel(dw, CLEAR.XFER, dwc->mask); | ||
612 | |||
613 | /* move pointer to next descriptor */ | ||
614 | dwc->tx_node_active = | ||
615 | dwc->tx_node_active->next; | ||
616 | |||
617 | dwc_do_single_block(dwc, desc); | ||
618 | |||
619 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
620 | continue; | ||
621 | } else { | ||
622 | /* we are done here */ | ||
623 | clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); | ||
624 | } | ||
625 | } | ||
626 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
627 | |||
628 | dwc_scan_descriptors(dw, dwc); | 526 | dwc_scan_descriptors(dw, dwc); |
629 | } | ||
630 | } | 527 | } |
631 | 528 | ||
632 | /* | 529 | /* |
633 | * Re-enable interrupts. | 530 | * Re-enable interrupts. Block Complete interrupts are only |
531 | * enabled if the INT_EN bit in the descriptor is set. This | ||
532 | * will trigger a scan before the whole list is done. | ||
634 | */ | 533 | */ |
635 | channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); | 534 | channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); |
535 | channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask); | ||
636 | channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); | 536 | channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); |
637 | } | 537 | } |
638 | 538 | ||
@@ -641,7 +541,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) | |||
641 | struct dw_dma *dw = dev_id; | 541 | struct dw_dma *dw = dev_id; |
642 | u32 status; | 542 | u32 status; |
643 | 543 | ||
644 | dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, | 544 | dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n", |
645 | dma_readl(dw, STATUS_INT)); | 545 | dma_readl(dw, STATUS_INT)); |
646 | 546 | ||
647 | /* | 547 | /* |
@@ -649,6 +549,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) | |||
649 | * softirq handler. | 549 | * softirq handler. |
650 | */ | 550 | */ |
651 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | 551 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); |
552 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | ||
652 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | 553 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); |
653 | 554 | ||
654 | status = dma_readl(dw, STATUS_INT); | 555 | status = dma_readl(dw, STATUS_INT); |
@@ -659,6 +560,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) | |||
659 | 560 | ||
660 | /* Try to recover */ | 561 | /* Try to recover */ |
661 | channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); | 562 | channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); |
563 | channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1); | ||
662 | channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); | 564 | channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); |
663 | channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); | 565 | channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); |
664 | channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); | 566 | channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); |
@@ -679,7 +581,7 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | |||
679 | unsigned long flags; | 581 | unsigned long flags; |
680 | 582 | ||
681 | spin_lock_irqsave(&dwc->lock, flags); | 583 | spin_lock_irqsave(&dwc->lock, flags); |
682 | cookie = dma_cookie_assign(tx); | 584 | cookie = dwc_assign_cookie(dwc, desc); |
683 | 585 | ||
684 | /* | 586 | /* |
685 | * REVISIT: We should attempt to chain as many descriptors as | 587 | * REVISIT: We should attempt to chain as many descriptors as |
@@ -687,12 +589,12 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | |||
687 | * for DMA. But this is hard to do in a race-free manner. | 589 | * for DMA. But this is hard to do in a race-free manner. |
688 | */ | 590 | */ |
689 | if (list_empty(&dwc->active_list)) { | 591 | if (list_empty(&dwc->active_list)) { |
690 | dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__, | 592 | dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", |
691 | desc->txd.cookie); | 593 | desc->txd.cookie); |
692 | list_add_tail(&desc->desc_node, &dwc->active_list); | 594 | list_add_tail(&desc->desc_node, &dwc->active_list); |
693 | dwc_dostart(dwc, dwc_first_active(dwc)); | 595 | dwc_dostart(dwc, dwc_first_active(dwc)); |
694 | } else { | 596 | } else { |
695 | dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, | 597 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", |
696 | desc->txd.cookie); | 598 | desc->txd.cookie); |
697 | 599 | ||
698 | list_add_tail(&desc->desc_node, &dwc->queue); | 600 | list_add_tail(&desc->desc_node, &dwc->queue); |
@@ -708,7 +610,6 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
708 | size_t len, unsigned long flags) | 610 | size_t len, unsigned long flags) |
709 | { | 611 | { |
710 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 612 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
711 | struct dw_dma_slave *dws = chan->private; | ||
712 | struct dw_desc *desc; | 613 | struct dw_desc *desc; |
713 | struct dw_desc *first; | 614 | struct dw_desc *first; |
714 | struct dw_desc *prev; | 615 | struct dw_desc *prev; |
@@ -716,26 +617,30 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
716 | size_t offset; | 617 | size_t offset; |
717 | unsigned int src_width; | 618 | unsigned int src_width; |
718 | unsigned int dst_width; | 619 | unsigned int dst_width; |
719 | unsigned int data_width; | ||
720 | u32 ctllo; | 620 | u32 ctllo; |
721 | 621 | ||
722 | dev_vdbg(chan2dev(chan), | 622 | dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n", |
723 | "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__, | 623 | dest, src, len, flags); |
724 | (unsigned long long)dest, (unsigned long long)src, | ||
725 | len, flags); | ||
726 | 624 | ||
727 | if (unlikely(!len)) { | 625 | if (unlikely(!len)) { |
728 | dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); | 626 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); |
729 | return NULL; | 627 | return NULL; |
730 | } | 628 | } |
731 | 629 | ||
732 | data_width = min_t(unsigned int, dwc->dw->data_width[dwc_get_sms(dws)], | 630 | /* |
733 | dwc->dw->data_width[dwc_get_dms(dws)]); | 631 | * We can be a lot more clever here, but this should take care |
734 | 632 | * of the most common optimization. | |
735 | src_width = dst_width = min_t(unsigned int, data_width, | 633 | */ |
736 | dwc_fast_fls(src | dest | len)); | 634 | if (!((src | dest | len) & 7)) |
635 | src_width = dst_width = 3; | ||
636 | else if (!((src | dest | len) & 3)) | ||
637 | src_width = dst_width = 2; | ||
638 | else if (!((src | dest | len) & 1)) | ||
639 | src_width = dst_width = 1; | ||
640 | else | ||
641 | src_width = dst_width = 0; | ||
737 | 642 | ||
738 | ctllo = DWC_DEFAULT_CTLLO(chan) | 643 | ctllo = DWC_DEFAULT_CTLLO(chan->private) |
739 | | DWC_CTLL_DST_WIDTH(dst_width) | 644 | | DWC_CTLL_DST_WIDTH(dst_width) |
740 | | DWC_CTLL_SRC_WIDTH(src_width) | 645 | | DWC_CTLL_SRC_WIDTH(src_width) |
741 | | DWC_CTLL_DST_INC | 646 | | DWC_CTLL_DST_INC |
@@ -745,7 +650,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
745 | 650 | ||
746 | for (offset = 0; offset < len; offset += xfer_count << src_width) { | 651 | for (offset = 0; offset < len; offset += xfer_count << src_width) { |
747 | xfer_count = min_t(size_t, (len - offset) >> src_width, | 652 | xfer_count = min_t(size_t, (len - offset) >> src_width, |
748 | dwc->block_size); | 653 | DWC_MAX_COUNT); |
749 | 654 | ||
750 | desc = dwc_desc_get(dwc); | 655 | desc = dwc_desc_get(dwc); |
751 | if (!desc) | 656 | if (!desc) |
@@ -791,53 +696,46 @@ err_desc_get: | |||
791 | 696 | ||
792 | static struct dma_async_tx_descriptor * | 697 | static struct dma_async_tx_descriptor * |
793 | dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 698 | dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
794 | unsigned int sg_len, enum dma_transfer_direction direction, | 699 | unsigned int sg_len, enum dma_data_direction direction, |
795 | unsigned long flags, void *context) | 700 | unsigned long flags) |
796 | { | 701 | { |
797 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 702 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
798 | struct dw_dma_slave *dws = chan->private; | 703 | struct dw_dma_slave *dws = chan->private; |
799 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; | ||
800 | struct dw_desc *prev; | 704 | struct dw_desc *prev; |
801 | struct dw_desc *first; | 705 | struct dw_desc *first; |
802 | u32 ctllo; | 706 | u32 ctllo; |
803 | dma_addr_t reg; | 707 | dma_addr_t reg; |
804 | unsigned int reg_width; | 708 | unsigned int reg_width; |
805 | unsigned int mem_width; | 709 | unsigned int mem_width; |
806 | unsigned int data_width; | ||
807 | unsigned int i; | 710 | unsigned int i; |
808 | struct scatterlist *sg; | 711 | struct scatterlist *sg; |
809 | size_t total_len = 0; | 712 | size_t total_len = 0; |
810 | 713 | ||
811 | dev_vdbg(chan2dev(chan), "%s\n", __func__); | 714 | dev_vdbg(chan2dev(chan), "prep_dma_slave\n"); |
812 | 715 | ||
813 | if (unlikely(!dws || !sg_len)) | 716 | if (unlikely(!dws || !sg_len)) |
814 | return NULL; | 717 | return NULL; |
815 | 718 | ||
719 | reg_width = dws->reg_width; | ||
816 | prev = first = NULL; | 720 | prev = first = NULL; |
817 | 721 | ||
818 | switch (direction) { | 722 | switch (direction) { |
819 | case DMA_MEM_TO_DEV: | 723 | case DMA_TO_DEVICE: |
820 | reg_width = __fls(sconfig->dst_addr_width); | 724 | ctllo = (DWC_DEFAULT_CTLLO(chan->private) |
821 | reg = sconfig->dst_addr; | ||
822 | ctllo = (DWC_DEFAULT_CTLLO(chan) | ||
823 | | DWC_CTLL_DST_WIDTH(reg_width) | 725 | | DWC_CTLL_DST_WIDTH(reg_width) |
824 | | DWC_CTLL_DST_FIX | 726 | | DWC_CTLL_DST_FIX |
825 | | DWC_CTLL_SRC_INC); | 727 | | DWC_CTLL_SRC_INC |
826 | 728 | | DWC_CTLL_FC(dws->fc)); | |
827 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | 729 | reg = dws->tx_reg; |
828 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); | ||
829 | |||
830 | data_width = dwc->dw->data_width[dwc_get_sms(dws)]; | ||
831 | |||
832 | for_each_sg(sgl, sg, sg_len, i) { | 730 | for_each_sg(sgl, sg, sg_len, i) { |
833 | struct dw_desc *desc; | 731 | struct dw_desc *desc; |
834 | u32 len, dlen, mem; | 732 | u32 len, dlen, mem; |
835 | 733 | ||
836 | mem = sg_dma_address(sg); | 734 | mem = sg_phys(sg); |
837 | len = sg_dma_len(sg); | 735 | len = sg_dma_len(sg); |
838 | 736 | mem_width = 2; | |
839 | mem_width = min_t(unsigned int, | 737 | if (unlikely(mem & 3 || len & 3)) |
840 | data_width, dwc_fast_fls(mem | len)); | 738 | mem_width = 0; |
841 | 739 | ||
842 | slave_sg_todev_fill_desc: | 740 | slave_sg_todev_fill_desc: |
843 | desc = dwc_desc_get(dwc); | 741 | desc = dwc_desc_get(dwc); |
@@ -850,8 +748,8 @@ slave_sg_todev_fill_desc: | |||
850 | desc->lli.sar = mem; | 748 | desc->lli.sar = mem; |
851 | desc->lli.dar = reg; | 749 | desc->lli.dar = reg; |
852 | desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); | 750 | desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); |
853 | if ((len >> mem_width) > dwc->block_size) { | 751 | if ((len >> mem_width) > DWC_MAX_COUNT) { |
854 | dlen = dwc->block_size << mem_width; | 752 | dlen = DWC_MAX_COUNT << mem_width; |
855 | mem += dlen; | 753 | mem += dlen; |
856 | len -= dlen; | 754 | len -= dlen; |
857 | } else { | 755 | } else { |
@@ -879,28 +777,23 @@ slave_sg_todev_fill_desc: | |||
879 | goto slave_sg_todev_fill_desc; | 777 | goto slave_sg_todev_fill_desc; |
880 | } | 778 | } |
881 | break; | 779 | break; |
882 | case DMA_DEV_TO_MEM: | 780 | case DMA_FROM_DEVICE: |
883 | reg_width = __fls(sconfig->src_addr_width); | 781 | ctllo = (DWC_DEFAULT_CTLLO(chan->private) |
884 | reg = sconfig->src_addr; | ||
885 | ctllo = (DWC_DEFAULT_CTLLO(chan) | ||
886 | | DWC_CTLL_SRC_WIDTH(reg_width) | 782 | | DWC_CTLL_SRC_WIDTH(reg_width) |
887 | | DWC_CTLL_DST_INC | 783 | | DWC_CTLL_DST_INC |
888 | | DWC_CTLL_SRC_FIX); | 784 | | DWC_CTLL_SRC_FIX |
889 | 785 | | DWC_CTLL_FC(dws->fc)); | |
890 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | ||
891 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); | ||
892 | |||
893 | data_width = dwc->dw->data_width[dwc_get_dms(dws)]; | ||
894 | 786 | ||
787 | reg = dws->rx_reg; | ||
895 | for_each_sg(sgl, sg, sg_len, i) { | 788 | for_each_sg(sgl, sg, sg_len, i) { |
896 | struct dw_desc *desc; | 789 | struct dw_desc *desc; |
897 | u32 len, dlen, mem; | 790 | u32 len, dlen, mem; |
898 | 791 | ||
899 | mem = sg_dma_address(sg); | 792 | mem = sg_phys(sg); |
900 | len = sg_dma_len(sg); | 793 | len = sg_dma_len(sg); |
901 | 794 | mem_width = 2; | |
902 | mem_width = min_t(unsigned int, | 795 | if (unlikely(mem & 3 || len & 3)) |
903 | data_width, dwc_fast_fls(mem | len)); | 796 | mem_width = 0; |
904 | 797 | ||
905 | slave_sg_fromdev_fill_desc: | 798 | slave_sg_fromdev_fill_desc: |
906 | desc = dwc_desc_get(dwc); | 799 | desc = dwc_desc_get(dwc); |
@@ -913,8 +806,8 @@ slave_sg_fromdev_fill_desc: | |||
913 | desc->lli.sar = reg; | 806 | desc->lli.sar = reg; |
914 | desc->lli.dar = mem; | 807 | desc->lli.dar = mem; |
915 | desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); | 808 | desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); |
916 | if ((len >> reg_width) > dwc->block_size) { | 809 | if ((len >> reg_width) > DWC_MAX_COUNT) { |
917 | dlen = dwc->block_size << reg_width; | 810 | dlen = DWC_MAX_COUNT << reg_width; |
918 | mem += dlen; | 811 | mem += dlen; |
919 | len -= dlen; | 812 | len -= dlen; |
920 | } else { | 813 | } else { |
@@ -963,39 +856,6 @@ err_desc_get: | |||
963 | return NULL; | 856 | return NULL; |
964 | } | 857 | } |
965 | 858 | ||
966 | /* | ||
967 | * Fix sconfig's burst size according to dw_dmac. We need to convert them as: | ||
968 | * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. | ||
969 | * | ||
970 | * NOTE: burst size 2 is not supported by controller. | ||
971 | * | ||
972 | * This can be done by finding least significant bit set: n & (n - 1) | ||
973 | */ | ||
974 | static inline void convert_burst(u32 *maxburst) | ||
975 | { | ||
976 | if (*maxburst > 1) | ||
977 | *maxburst = fls(*maxburst) - 2; | ||
978 | else | ||
979 | *maxburst = 0; | ||
980 | } | ||
981 | |||
982 | static int | ||
983 | set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) | ||
984 | { | ||
985 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | ||
986 | |||
987 | /* Check if it is chan is configured for slave transfers */ | ||
988 | if (!chan->private) | ||
989 | return -EINVAL; | ||
990 | |||
991 | memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); | ||
992 | |||
993 | convert_burst(&dwc->dma_sconfig.src_maxburst); | ||
994 | convert_burst(&dwc->dma_sconfig.dst_maxburst); | ||
995 | |||
996 | return 0; | ||
997 | } | ||
998 | |||
999 | static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 859 | static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
1000 | unsigned long arg) | 860 | unsigned long arg) |
1001 | { | 861 | { |
@@ -1030,9 +890,9 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1030 | } else if (cmd == DMA_TERMINATE_ALL) { | 890 | } else if (cmd == DMA_TERMINATE_ALL) { |
1031 | spin_lock_irqsave(&dwc->lock, flags); | 891 | spin_lock_irqsave(&dwc->lock, flags); |
1032 | 892 | ||
1033 | clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); | 893 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1034 | 894 | while (dma_readl(dw, CH_EN) & dwc->mask) | |
1035 | dwc_chan_disable(dw, dwc); | 895 | cpu_relax(); |
1036 | 896 | ||
1037 | dwc->paused = false; | 897 | dwc->paused = false; |
1038 | 898 | ||
@@ -1045,11 +905,8 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1045 | /* Flush all pending and queued descriptors */ | 905 | /* Flush all pending and queued descriptors */ |
1046 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | 906 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
1047 | dwc_descriptor_complete(dwc, desc, false); | 907 | dwc_descriptor_complete(dwc, desc, false); |
1048 | } else if (cmd == DMA_SLAVE_CONFIG) { | 908 | } else |
1049 | return set_runtime_config(chan, (struct dma_slave_config *)arg); | ||
1050 | } else { | ||
1051 | return -ENXIO; | 909 | return -ENXIO; |
1052 | } | ||
1053 | 910 | ||
1054 | return 0; | 911 | return 0; |
1055 | } | 912 | } |
@@ -1060,17 +917,28 @@ dwc_tx_status(struct dma_chan *chan, | |||
1060 | struct dma_tx_state *txstate) | 917 | struct dma_tx_state *txstate) |
1061 | { | 918 | { |
1062 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 919 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1063 | enum dma_status ret; | 920 | dma_cookie_t last_used; |
921 | dma_cookie_t last_complete; | ||
922 | int ret; | ||
923 | |||
924 | last_complete = dwc->completed; | ||
925 | last_used = chan->cookie; | ||
1064 | 926 | ||
1065 | ret = dma_cookie_status(chan, cookie, txstate); | 927 | ret = dma_async_is_complete(cookie, last_complete, last_used); |
1066 | if (ret != DMA_SUCCESS) { | 928 | if (ret != DMA_SUCCESS) { |
1067 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | 929 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); |
1068 | 930 | ||
1069 | ret = dma_cookie_status(chan, cookie, txstate); | 931 | last_complete = dwc->completed; |
932 | last_used = chan->cookie; | ||
933 | |||
934 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
1070 | } | 935 | } |
1071 | 936 | ||
1072 | if (ret != DMA_SUCCESS) | 937 | if (ret != DMA_SUCCESS) |
1073 | dma_set_residue(txstate, dwc_first_active(dwc)->len); | 938 | dma_set_tx_state(txstate, last_complete, last_used, |
939 | dwc_first_active(dwc)->len); | ||
940 | else | ||
941 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
1074 | 942 | ||
1075 | if (dwc->paused) | 943 | if (dwc->paused) |
1076 | return DMA_PAUSED; | 944 | return DMA_PAUSED; |
@@ -1091,10 +959,13 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
1091 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 959 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1092 | struct dw_dma *dw = to_dw_dma(chan->device); | 960 | struct dw_dma *dw = to_dw_dma(chan->device); |
1093 | struct dw_desc *desc; | 961 | struct dw_desc *desc; |
962 | struct dw_dma_slave *dws; | ||
1094 | int i; | 963 | int i; |
964 | u32 cfghi; | ||
965 | u32 cfglo; | ||
1095 | unsigned long flags; | 966 | unsigned long flags; |
1096 | 967 | ||
1097 | dev_vdbg(chan2dev(chan), "%s\n", __func__); | 968 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); |
1098 | 969 | ||
1099 | /* ASSERT: channel is idle */ | 970 | /* ASSERT: channel is idle */ |
1100 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 971 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
@@ -1102,7 +973,27 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
1102 | return -EIO; | 973 | return -EIO; |
1103 | } | 974 | } |
1104 | 975 | ||
1105 | dma_cookie_init(chan); | 976 | dwc->completed = chan->cookie = 1; |
977 | |||
978 | cfghi = DWC_CFGH_FIFO_MODE; | ||
979 | cfglo = 0; | ||
980 | |||
981 | dws = chan->private; | ||
982 | if (dws) { | ||
983 | /* | ||
984 | * We need controller-specific data to set up slave | ||
985 | * transfers. | ||
986 | */ | ||
987 | BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); | ||
988 | |||
989 | cfghi = dws->cfg_hi; | ||
990 | cfglo = dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; | ||
991 | } | ||
992 | |||
993 | cfglo |= DWC_CFGL_CH_PRIOR(dwc->priority); | ||
994 | |||
995 | channel_writel(dwc, CFG_LO, cfglo); | ||
996 | channel_writel(dwc, CFG_HI, cfghi); | ||
1106 | 997 | ||
1107 | /* | 998 | /* |
1108 | * NOTE: some controllers may have additional features that we | 999 | * NOTE: some controllers may have additional features that we |
@@ -1135,9 +1026,15 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
1135 | i = ++dwc->descs_allocated; | 1026 | i = ++dwc->descs_allocated; |
1136 | } | 1027 | } |
1137 | 1028 | ||
1029 | /* Enable interrupts */ | ||
1030 | channel_set_bit(dw, MASK.XFER, dwc->mask); | ||
1031 | channel_set_bit(dw, MASK.BLOCK, dwc->mask); | ||
1032 | channel_set_bit(dw, MASK.ERROR, dwc->mask); | ||
1033 | |||
1138 | spin_unlock_irqrestore(&dwc->lock, flags); | 1034 | spin_unlock_irqrestore(&dwc->lock, flags); |
1139 | 1035 | ||
1140 | dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); | 1036 | dev_dbg(chan2dev(chan), |
1037 | "alloc_chan_resources allocated %d descriptors\n", i); | ||
1141 | 1038 | ||
1142 | return i; | 1039 | return i; |
1143 | } | 1040 | } |
@@ -1150,7 +1047,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
1150 | unsigned long flags; | 1047 | unsigned long flags; |
1151 | LIST_HEAD(list); | 1048 | LIST_HEAD(list); |
1152 | 1049 | ||
1153 | dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__, | 1050 | dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", |
1154 | dwc->descs_allocated); | 1051 | dwc->descs_allocated); |
1155 | 1052 | ||
1156 | /* ASSERT: channel is idle */ | 1053 | /* ASSERT: channel is idle */ |
@@ -1161,10 +1058,10 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
1161 | spin_lock_irqsave(&dwc->lock, flags); | 1058 | spin_lock_irqsave(&dwc->lock, flags); |
1162 | list_splice_init(&dwc->free_list, &list); | 1059 | list_splice_init(&dwc->free_list, &list); |
1163 | dwc->descs_allocated = 0; | 1060 | dwc->descs_allocated = 0; |
1164 | dwc->initialized = false; | ||
1165 | 1061 | ||
1166 | /* Disable interrupts */ | 1062 | /* Disable interrupts */ |
1167 | channel_clear_bit(dw, MASK.XFER, dwc->mask); | 1063 | channel_clear_bit(dw, MASK.XFER, dwc->mask); |
1064 | channel_clear_bit(dw, MASK.BLOCK, dwc->mask); | ||
1168 | channel_clear_bit(dw, MASK.ERROR, dwc->mask); | 1065 | channel_clear_bit(dw, MASK.ERROR, dwc->mask); |
1169 | 1066 | ||
1170 | spin_unlock_irqrestore(&dwc->lock, flags); | 1067 | spin_unlock_irqrestore(&dwc->lock, flags); |
@@ -1176,7 +1073,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
1176 | kfree(desc); | 1073 | kfree(desc); |
1177 | } | 1074 | } |
1178 | 1075 | ||
1179 | dev_vdbg(chan2dev(chan), "%s: done\n", __func__); | 1076 | dev_vdbg(chan2dev(chan), "free_chan_resources done\n"); |
1180 | } | 1077 | } |
1181 | 1078 | ||
1182 | /* --------------------- Cyclic DMA API extensions -------------------- */ | 1079 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
@@ -1205,11 +1102,18 @@ int dw_dma_cyclic_start(struct dma_chan *chan) | |||
1205 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 1102 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
1206 | dev_err(chan2dev(&dwc->chan), | 1103 | dev_err(chan2dev(&dwc->chan), |
1207 | "BUG: Attempted to start non-idle channel\n"); | 1104 | "BUG: Attempted to start non-idle channel\n"); |
1208 | dwc_dump_chan_regs(dwc); | 1105 | dev_err(chan2dev(&dwc->chan), |
1106 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | ||
1107 | channel_readl(dwc, SAR), | ||
1108 | channel_readl(dwc, DAR), | ||
1109 | channel_readl(dwc, LLP), | ||
1110 | channel_readl(dwc, CTL_HI), | ||
1111 | channel_readl(dwc, CTL_LO)); | ||
1209 | spin_unlock_irqrestore(&dwc->lock, flags); | 1112 | spin_unlock_irqrestore(&dwc->lock, flags); |
1210 | return -EBUSY; | 1113 | return -EBUSY; |
1211 | } | 1114 | } |
1212 | 1115 | ||
1116 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); | ||
1213 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | 1117 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
1214 | dma_writel(dw, CLEAR.XFER, dwc->mask); | 1118 | dma_writel(dw, CLEAR.XFER, dwc->mask); |
1215 | 1119 | ||
@@ -1240,7 +1144,9 @@ void dw_dma_cyclic_stop(struct dma_chan *chan) | |||
1240 | 1144 | ||
1241 | spin_lock_irqsave(&dwc->lock, flags); | 1145 | spin_lock_irqsave(&dwc->lock, flags); |
1242 | 1146 | ||
1243 | dwc_chan_disable(dw, dwc); | 1147 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1148 | while (dma_readl(dw, CH_EN) & dwc->mask) | ||
1149 | cpu_relax(); | ||
1244 | 1150 | ||
1245 | spin_unlock_irqrestore(&dwc->lock, flags); | 1151 | spin_unlock_irqrestore(&dwc->lock, flags); |
1246 | } | 1152 | } |
@@ -1259,14 +1165,14 @@ EXPORT_SYMBOL(dw_dma_cyclic_stop); | |||
1259 | */ | 1165 | */ |
1260 | struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | 1166 | struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, |
1261 | dma_addr_t buf_addr, size_t buf_len, size_t period_len, | 1167 | dma_addr_t buf_addr, size_t buf_len, size_t period_len, |
1262 | enum dma_transfer_direction direction) | 1168 | enum dma_data_direction direction) |
1263 | { | 1169 | { |
1264 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 1170 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1265 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; | ||
1266 | struct dw_cyclic_desc *cdesc; | 1171 | struct dw_cyclic_desc *cdesc; |
1267 | struct dw_cyclic_desc *retval = NULL; | 1172 | struct dw_cyclic_desc *retval = NULL; |
1268 | struct dw_desc *desc; | 1173 | struct dw_desc *desc; |
1269 | struct dw_desc *last = NULL; | 1174 | struct dw_desc *last = NULL; |
1175 | struct dw_dma_slave *dws = chan->private; | ||
1270 | unsigned long was_cyclic; | 1176 | unsigned long was_cyclic; |
1271 | unsigned int reg_width; | 1177 | unsigned int reg_width; |
1272 | unsigned int periods; | 1178 | unsigned int periods; |
@@ -1274,13 +1180,6 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1274 | unsigned long flags; | 1180 | unsigned long flags; |
1275 | 1181 | ||
1276 | spin_lock_irqsave(&dwc->lock, flags); | 1182 | spin_lock_irqsave(&dwc->lock, flags); |
1277 | if (dwc->nollp) { | ||
1278 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
1279 | dev_dbg(chan2dev(&dwc->chan), | ||
1280 | "channel doesn't support LLP transfers\n"); | ||
1281 | return ERR_PTR(-EINVAL); | ||
1282 | } | ||
1283 | |||
1284 | if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { | 1183 | if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { |
1285 | spin_unlock_irqrestore(&dwc->lock, flags); | 1184 | spin_unlock_irqrestore(&dwc->lock, flags); |
1286 | dev_dbg(chan2dev(&dwc->chan), | 1185 | dev_dbg(chan2dev(&dwc->chan), |
@@ -1297,22 +1196,17 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1297 | } | 1196 | } |
1298 | 1197 | ||
1299 | retval = ERR_PTR(-EINVAL); | 1198 | retval = ERR_PTR(-EINVAL); |
1300 | 1199 | reg_width = dws->reg_width; | |
1301 | if (direction == DMA_MEM_TO_DEV) | ||
1302 | reg_width = __ffs(sconfig->dst_addr_width); | ||
1303 | else | ||
1304 | reg_width = __ffs(sconfig->src_addr_width); | ||
1305 | |||
1306 | periods = buf_len / period_len; | 1200 | periods = buf_len / period_len; |
1307 | 1201 | ||
1308 | /* Check for too big/unaligned periods and unaligned DMA buffer. */ | 1202 | /* Check for too big/unaligned periods and unaligned DMA buffer. */ |
1309 | if (period_len > (dwc->block_size << reg_width)) | 1203 | if (period_len > (DWC_MAX_COUNT << reg_width)) |
1310 | goto out_err; | 1204 | goto out_err; |
1311 | if (unlikely(period_len & ((1 << reg_width) - 1))) | 1205 | if (unlikely(period_len & ((1 << reg_width) - 1))) |
1312 | goto out_err; | 1206 | goto out_err; |
1313 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) | 1207 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) |
1314 | goto out_err; | 1208 | goto out_err; |
1315 | if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM)))) | 1209 | if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE)))) |
1316 | goto out_err; | 1210 | goto out_err; |
1317 | 1211 | ||
1318 | retval = ERR_PTR(-ENOMEM); | 1212 | retval = ERR_PTR(-ENOMEM); |
@@ -1334,35 +1228,27 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1334 | goto out_err_desc_get; | 1228 | goto out_err_desc_get; |
1335 | 1229 | ||
1336 | switch (direction) { | 1230 | switch (direction) { |
1337 | case DMA_MEM_TO_DEV: | 1231 | case DMA_TO_DEVICE: |
1338 | desc->lli.dar = sconfig->dst_addr; | 1232 | desc->lli.dar = dws->tx_reg; |
1339 | desc->lli.sar = buf_addr + (period_len * i); | 1233 | desc->lli.sar = buf_addr + (period_len * i); |
1340 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) | 1234 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) |
1341 | | DWC_CTLL_DST_WIDTH(reg_width) | 1235 | | DWC_CTLL_DST_WIDTH(reg_width) |
1342 | | DWC_CTLL_SRC_WIDTH(reg_width) | 1236 | | DWC_CTLL_SRC_WIDTH(reg_width) |
1343 | | DWC_CTLL_DST_FIX | 1237 | | DWC_CTLL_DST_FIX |
1344 | | DWC_CTLL_SRC_INC | 1238 | | DWC_CTLL_SRC_INC |
1239 | | DWC_CTLL_FC(dws->fc) | ||
1345 | | DWC_CTLL_INT_EN); | 1240 | | DWC_CTLL_INT_EN); |
1346 | |||
1347 | desc->lli.ctllo |= sconfig->device_fc ? | ||
1348 | DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | ||
1349 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); | ||
1350 | |||
1351 | break; | 1241 | break; |
1352 | case DMA_DEV_TO_MEM: | 1242 | case DMA_FROM_DEVICE: |
1353 | desc->lli.dar = buf_addr + (period_len * i); | 1243 | desc->lli.dar = buf_addr + (period_len * i); |
1354 | desc->lli.sar = sconfig->src_addr; | 1244 | desc->lli.sar = dws->rx_reg; |
1355 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) | 1245 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) |
1356 | | DWC_CTLL_SRC_WIDTH(reg_width) | 1246 | | DWC_CTLL_SRC_WIDTH(reg_width) |
1357 | | DWC_CTLL_DST_WIDTH(reg_width) | 1247 | | DWC_CTLL_DST_WIDTH(reg_width) |
1358 | | DWC_CTLL_DST_INC | 1248 | | DWC_CTLL_DST_INC |
1359 | | DWC_CTLL_SRC_FIX | 1249 | | DWC_CTLL_SRC_FIX |
1250 | | DWC_CTLL_FC(dws->fc) | ||
1360 | | DWC_CTLL_INT_EN); | 1251 | | DWC_CTLL_INT_EN); |
1361 | |||
1362 | desc->lli.ctllo |= sconfig->device_fc ? | ||
1363 | DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | ||
1364 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); | ||
1365 | |||
1366 | break; | 1252 | break; |
1367 | default: | 1253 | default: |
1368 | break; | 1254 | break; |
@@ -1386,9 +1272,9 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1386 | dma_sync_single_for_device(chan2parent(chan), last->txd.phys, | 1272 | dma_sync_single_for_device(chan2parent(chan), last->txd.phys, |
1387 | sizeof(last->lli), DMA_TO_DEVICE); | 1273 | sizeof(last->lli), DMA_TO_DEVICE); |
1388 | 1274 | ||
1389 | dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu " | 1275 | dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu " |
1390 | "period %zu periods %d\n", (unsigned long long)buf_addr, | 1276 | "period %zu periods %d\n", buf_addr, buf_len, |
1391 | buf_len, period_len, periods); | 1277 | period_len, periods); |
1392 | 1278 | ||
1393 | cdesc->periods = periods; | 1279 | cdesc->periods = periods; |
1394 | dwc->cdesc = cdesc; | 1280 | dwc->cdesc = cdesc; |
@@ -1418,15 +1304,18 @@ void dw_dma_cyclic_free(struct dma_chan *chan) | |||
1418 | int i; | 1304 | int i; |
1419 | unsigned long flags; | 1305 | unsigned long flags; |
1420 | 1306 | ||
1421 | dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__); | 1307 | dev_dbg(chan2dev(&dwc->chan), "cyclic free\n"); |
1422 | 1308 | ||
1423 | if (!cdesc) | 1309 | if (!cdesc) |
1424 | return; | 1310 | return; |
1425 | 1311 | ||
1426 | spin_lock_irqsave(&dwc->lock, flags); | 1312 | spin_lock_irqsave(&dwc->lock, flags); |
1427 | 1313 | ||
1428 | dwc_chan_disable(dw, dwc); | 1314 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1315 | while (dma_readl(dw, CH_EN) & dwc->mask) | ||
1316 | cpu_relax(); | ||
1429 | 1317 | ||
1318 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); | ||
1430 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | 1319 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
1431 | dma_writel(dw, CLEAR.XFER, dwc->mask); | 1320 | dma_writel(dw, CLEAR.XFER, dwc->mask); |
1432 | 1321 | ||
@@ -1446,38 +1335,29 @@ EXPORT_SYMBOL(dw_dma_cyclic_free); | |||
1446 | 1335 | ||
1447 | static void dw_dma_off(struct dw_dma *dw) | 1336 | static void dw_dma_off(struct dw_dma *dw) |
1448 | { | 1337 | { |
1449 | int i; | ||
1450 | |||
1451 | dma_writel(dw, CFG, 0); | 1338 | dma_writel(dw, CFG, 0); |
1452 | 1339 | ||
1453 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | 1340 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); |
1341 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | ||
1454 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); | 1342 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); |
1455 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); | 1343 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); |
1456 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | 1344 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); |
1457 | 1345 | ||
1458 | while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) | 1346 | while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) |
1459 | cpu_relax(); | 1347 | cpu_relax(); |
1460 | |||
1461 | for (i = 0; i < dw->dma.chancnt; i++) | ||
1462 | dw->chan[i].initialized = false; | ||
1463 | } | 1348 | } |
1464 | 1349 | ||
1465 | static int dw_probe(struct platform_device *pdev) | 1350 | static int __init dw_probe(struct platform_device *pdev) |
1466 | { | 1351 | { |
1467 | struct dw_dma_platform_data *pdata; | 1352 | struct dw_dma_platform_data *pdata; |
1468 | struct resource *io; | 1353 | struct resource *io; |
1469 | struct dw_dma *dw; | 1354 | struct dw_dma *dw; |
1470 | size_t size; | 1355 | size_t size; |
1471 | void __iomem *regs; | ||
1472 | bool autocfg; | ||
1473 | unsigned int dw_params; | ||
1474 | unsigned int nr_channels; | ||
1475 | unsigned int max_blk_size = 0; | ||
1476 | int irq; | 1356 | int irq; |
1477 | int err; | 1357 | int err; |
1478 | int i; | 1358 | int i; |
1479 | 1359 | ||
1480 | pdata = dev_get_platdata(&pdev->dev); | 1360 | pdata = pdev->dev.platform_data; |
1481 | if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) | 1361 | if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) |
1482 | return -EINVAL; | 1362 | return -EINVAL; |
1483 | 1363 | ||
@@ -1489,69 +1369,50 @@ static int dw_probe(struct platform_device *pdev) | |||
1489 | if (irq < 0) | 1369 | if (irq < 0) |
1490 | return irq; | 1370 | return irq; |
1491 | 1371 | ||
1492 | regs = devm_request_and_ioremap(&pdev->dev, io); | 1372 | size = sizeof(struct dw_dma); |
1493 | if (!regs) | 1373 | size += pdata->nr_channels * sizeof(struct dw_dma_chan); |
1494 | return -EBUSY; | 1374 | dw = kzalloc(size, GFP_KERNEL); |
1495 | |||
1496 | dw_params = dma_read_byaddr(regs, DW_PARAMS); | ||
1497 | autocfg = dw_params >> DW_PARAMS_EN & 0x1; | ||
1498 | |||
1499 | if (autocfg) | ||
1500 | nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1; | ||
1501 | else | ||
1502 | nr_channels = pdata->nr_channels; | ||
1503 | |||
1504 | size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan); | ||
1505 | dw = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); | ||
1506 | if (!dw) | 1375 | if (!dw) |
1507 | return -ENOMEM; | 1376 | return -ENOMEM; |
1508 | 1377 | ||
1509 | dw->clk = devm_clk_get(&pdev->dev, "hclk"); | 1378 | if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) { |
1510 | if (IS_ERR(dw->clk)) | 1379 | err = -EBUSY; |
1511 | return PTR_ERR(dw->clk); | 1380 | goto err_kfree; |
1512 | clk_prepare_enable(dw->clk); | 1381 | } |
1513 | |||
1514 | dw->regs = regs; | ||
1515 | |||
1516 | /* get hardware configuration parameters */ | ||
1517 | if (autocfg) { | ||
1518 | max_blk_size = dma_readl(dw, MAX_BLK_SIZE); | ||
1519 | 1382 | ||
1520 | dw->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1; | 1383 | dw->regs = ioremap(io->start, DW_REGLEN); |
1521 | for (i = 0; i < dw->nr_masters; i++) { | 1384 | if (!dw->regs) { |
1522 | dw->data_width[i] = | 1385 | err = -ENOMEM; |
1523 | (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2; | 1386 | goto err_release_r; |
1524 | } | ||
1525 | } else { | ||
1526 | dw->nr_masters = pdata->nr_masters; | ||
1527 | memcpy(dw->data_width, pdata->data_width, 4); | ||
1528 | } | 1387 | } |
1529 | 1388 | ||
1530 | /* Calculate all channel mask before DMA setup */ | 1389 | dw->clk = clk_get(&pdev->dev, "hclk"); |
1531 | dw->all_chan_mask = (1 << nr_channels) - 1; | 1390 | if (IS_ERR(dw->clk)) { |
1391 | err = PTR_ERR(dw->clk); | ||
1392 | goto err_clk; | ||
1393 | } | ||
1394 | clk_enable(dw->clk); | ||
1532 | 1395 | ||
1533 | /* force dma off, just in case */ | 1396 | /* force dma off, just in case */ |
1534 | dw_dma_off(dw); | 1397 | dw_dma_off(dw); |
1535 | 1398 | ||
1536 | /* disable BLOCK interrupts as well */ | 1399 | err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw); |
1537 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | ||
1538 | |||
1539 | err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0, | ||
1540 | "dw_dmac", dw); | ||
1541 | if (err) | 1400 | if (err) |
1542 | return err; | 1401 | goto err_irq; |
1543 | 1402 | ||
1544 | platform_set_drvdata(pdev, dw); | 1403 | platform_set_drvdata(pdev, dw); |
1545 | 1404 | ||
1546 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); | 1405 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); |
1547 | 1406 | ||
1407 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; | ||
1408 | |||
1548 | INIT_LIST_HEAD(&dw->dma.channels); | 1409 | INIT_LIST_HEAD(&dw->dma.channels); |
1549 | for (i = 0; i < nr_channels; i++) { | 1410 | for (i = 0; i < pdata->nr_channels; i++, dw->dma.chancnt++) { |
1550 | struct dw_dma_chan *dwc = &dw->chan[i]; | 1411 | struct dw_dma_chan *dwc = &dw->chan[i]; |
1551 | int r = nr_channels - i - 1; | ||
1552 | 1412 | ||
1553 | dwc->chan.device = &dw->dma; | 1413 | dwc->chan.device = &dw->dma; |
1554 | dma_cookie_init(&dwc->chan); | 1414 | dwc->chan.cookie = dwc->completed = 1; |
1415 | dwc->chan.chan_id = i; | ||
1555 | if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) | 1416 | if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) |
1556 | list_add_tail(&dwc->chan.device_node, | 1417 | list_add_tail(&dwc->chan.device_node, |
1557 | &dw->dma.channels); | 1418 | &dw->dma.channels); |
@@ -1560,7 +1421,7 @@ static int dw_probe(struct platform_device *pdev) | |||
1560 | 1421 | ||
1561 | /* 7 is highest priority & 0 is lowest. */ | 1422 | /* 7 is highest priority & 0 is lowest. */ |
1562 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) | 1423 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) |
1563 | dwc->priority = r; | 1424 | dwc->priority = 7 - i; |
1564 | else | 1425 | else |
1565 | dwc->priority = i; | 1426 | dwc->priority = i; |
1566 | 1427 | ||
@@ -1573,41 +1434,21 @@ static int dw_probe(struct platform_device *pdev) | |||
1573 | INIT_LIST_HEAD(&dwc->free_list); | 1434 | INIT_LIST_HEAD(&dwc->free_list); |
1574 | 1435 | ||
1575 | channel_clear_bit(dw, CH_EN, dwc->mask); | 1436 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1576 | |||
1577 | dwc->dw = dw; | ||
1578 | |||
1579 | /* hardware configuration */ | ||
1580 | if (autocfg) { | ||
1581 | unsigned int dwc_params; | ||
1582 | |||
1583 | dwc_params = dma_read_byaddr(regs + r * sizeof(u32), | ||
1584 | DWC_PARAMS); | ||
1585 | |||
1586 | /* Decode maximum block size for given channel. The | ||
1587 | * stored 4 bit value represents blocks from 0x00 for 3 | ||
1588 | * up to 0x0a for 4095. */ | ||
1589 | dwc->block_size = | ||
1590 | (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1; | ||
1591 | dwc->nollp = | ||
1592 | (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0; | ||
1593 | } else { | ||
1594 | dwc->block_size = pdata->block_size; | ||
1595 | |||
1596 | /* Check if channel supports multi block transfer */ | ||
1597 | channel_writel(dwc, LLP, 0xfffffffc); | ||
1598 | dwc->nollp = | ||
1599 | (channel_readl(dwc, LLP) & 0xfffffffc) == 0; | ||
1600 | channel_writel(dwc, LLP, 0); | ||
1601 | } | ||
1602 | } | 1437 | } |
1603 | 1438 | ||
1604 | /* Clear all interrupts on all channels. */ | 1439 | /* Clear/disable all interrupts on all channels. */ |
1605 | dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); | 1440 | dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); |
1606 | dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); | 1441 | dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); |
1607 | dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); | 1442 | dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); |
1608 | dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); | 1443 | dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); |
1609 | dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); | 1444 | dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); |
1610 | 1445 | ||
1446 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | ||
1447 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | ||
1448 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); | ||
1449 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); | ||
1450 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | ||
1451 | |||
1611 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); | 1452 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); |
1612 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); | 1453 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); |
1613 | if (pdata->is_private) | 1454 | if (pdata->is_private) |
@@ -1627,21 +1468,35 @@ static int dw_probe(struct platform_device *pdev) | |||
1627 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | 1468 | dma_writel(dw, CFG, DW_CFG_DMA_EN); |
1628 | 1469 | ||
1629 | printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", | 1470 | printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", |
1630 | dev_name(&pdev->dev), nr_channels); | 1471 | dev_name(&pdev->dev), dw->dma.chancnt); |
1631 | 1472 | ||
1632 | dma_async_device_register(&dw->dma); | 1473 | dma_async_device_register(&dw->dma); |
1633 | 1474 | ||
1634 | return 0; | 1475 | return 0; |
1476 | |||
1477 | err_irq: | ||
1478 | clk_disable(dw->clk); | ||
1479 | clk_put(dw->clk); | ||
1480 | err_clk: | ||
1481 | iounmap(dw->regs); | ||
1482 | dw->regs = NULL; | ||
1483 | err_release_r: | ||
1484 | release_resource(io); | ||
1485 | err_kfree: | ||
1486 | kfree(dw); | ||
1487 | return err; | ||
1635 | } | 1488 | } |
1636 | 1489 | ||
1637 | static int dw_remove(struct platform_device *pdev) | 1490 | static int __exit dw_remove(struct platform_device *pdev) |
1638 | { | 1491 | { |
1639 | struct dw_dma *dw = platform_get_drvdata(pdev); | 1492 | struct dw_dma *dw = platform_get_drvdata(pdev); |
1640 | struct dw_dma_chan *dwc, *_dwc; | 1493 | struct dw_dma_chan *dwc, *_dwc; |
1494 | struct resource *io; | ||
1641 | 1495 | ||
1642 | dw_dma_off(dw); | 1496 | dw_dma_off(dw); |
1643 | dma_async_device_unregister(&dw->dma); | 1497 | dma_async_device_unregister(&dw->dma); |
1644 | 1498 | ||
1499 | free_irq(platform_get_irq(pdev, 0), dw); | ||
1645 | tasklet_kill(&dw->tasklet); | 1500 | tasklet_kill(&dw->tasklet); |
1646 | 1501 | ||
1647 | list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, | 1502 | list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, |
@@ -1650,6 +1505,17 @@ static int dw_remove(struct platform_device *pdev) | |||
1650 | channel_clear_bit(dw, CH_EN, dwc->mask); | 1505 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1651 | } | 1506 | } |
1652 | 1507 | ||
1508 | clk_disable(dw->clk); | ||
1509 | clk_put(dw->clk); | ||
1510 | |||
1511 | iounmap(dw->regs); | ||
1512 | dw->regs = NULL; | ||
1513 | |||
1514 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1515 | release_mem_region(io->start, DW_REGLEN); | ||
1516 | |||
1517 | kfree(dw); | ||
1518 | |||
1653 | return 0; | 1519 | return 0; |
1654 | } | 1520 | } |
1655 | 1521 | ||
@@ -1658,7 +1524,7 @@ static void dw_shutdown(struct platform_device *pdev) | |||
1658 | struct dw_dma *dw = platform_get_drvdata(pdev); | 1524 | struct dw_dma *dw = platform_get_drvdata(pdev); |
1659 | 1525 | ||
1660 | dw_dma_off(platform_get_drvdata(pdev)); | 1526 | dw_dma_off(platform_get_drvdata(pdev)); |
1661 | clk_disable_unprepare(dw->clk); | 1527 | clk_disable(dw->clk); |
1662 | } | 1528 | } |
1663 | 1529 | ||
1664 | static int dw_suspend_noirq(struct device *dev) | 1530 | static int dw_suspend_noirq(struct device *dev) |
@@ -1667,8 +1533,7 @@ static int dw_suspend_noirq(struct device *dev) | |||
1667 | struct dw_dma *dw = platform_get_drvdata(pdev); | 1533 | struct dw_dma *dw = platform_get_drvdata(pdev); |
1668 | 1534 | ||
1669 | dw_dma_off(platform_get_drvdata(pdev)); | 1535 | dw_dma_off(platform_get_drvdata(pdev)); |
1670 | clk_disable_unprepare(dw->clk); | 1536 | clk_disable(dw->clk); |
1671 | |||
1672 | return 0; | 1537 | return 0; |
1673 | } | 1538 | } |
1674 | 1539 | ||
@@ -1677,7 +1542,7 @@ static int dw_resume_noirq(struct device *dev) | |||
1677 | struct platform_device *pdev = to_platform_device(dev); | 1542 | struct platform_device *pdev = to_platform_device(dev); |
1678 | struct dw_dma *dw = platform_get_drvdata(pdev); | 1543 | struct dw_dma *dw = platform_get_drvdata(pdev); |
1679 | 1544 | ||
1680 | clk_prepare_enable(dw->clk); | 1545 | clk_enable(dw->clk); |
1681 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | 1546 | dma_writel(dw, CFG, DW_CFG_DMA_EN); |
1682 | return 0; | 1547 | return 0; |
1683 | } | 1548 | } |
@@ -1685,27 +1550,14 @@ static int dw_resume_noirq(struct device *dev) | |||
1685 | static const struct dev_pm_ops dw_dev_pm_ops = { | 1550 | static const struct dev_pm_ops dw_dev_pm_ops = { |
1686 | .suspend_noirq = dw_suspend_noirq, | 1551 | .suspend_noirq = dw_suspend_noirq, |
1687 | .resume_noirq = dw_resume_noirq, | 1552 | .resume_noirq = dw_resume_noirq, |
1688 | .freeze_noirq = dw_suspend_noirq, | ||
1689 | .thaw_noirq = dw_resume_noirq, | ||
1690 | .restore_noirq = dw_resume_noirq, | ||
1691 | .poweroff_noirq = dw_suspend_noirq, | ||
1692 | }; | ||
1693 | |||
1694 | #ifdef CONFIG_OF | ||
1695 | static const struct of_device_id dw_dma_id_table[] = { | ||
1696 | { .compatible = "snps,dma-spear1340" }, | ||
1697 | {} | ||
1698 | }; | 1553 | }; |
1699 | MODULE_DEVICE_TABLE(of, dw_dma_id_table); | ||
1700 | #endif | ||
1701 | 1554 | ||
1702 | static struct platform_driver dw_driver = { | 1555 | static struct platform_driver dw_driver = { |
1703 | .remove = dw_remove, | 1556 | .remove = __exit_p(dw_remove), |
1704 | .shutdown = dw_shutdown, | 1557 | .shutdown = dw_shutdown, |
1705 | .driver = { | 1558 | .driver = { |
1706 | .name = "dw_dmac", | 1559 | .name = "dw_dmac", |
1707 | .pm = &dw_dev_pm_ops, | 1560 | .pm = &dw_dev_pm_ops, |
1708 | .of_match_table = of_match_ptr(dw_dma_id_table), | ||
1709 | }, | 1561 | }, |
1710 | }; | 1562 | }; |
1711 | 1563 | ||
@@ -1724,4 +1576,4 @@ module_exit(dw_exit); | |||
1724 | MODULE_LICENSE("GPL v2"); | 1576 | MODULE_LICENSE("GPL v2"); |
1725 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); | 1577 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); |
1726 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); | 1578 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); |
1727 | MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); | 1579 | MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); |
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index 88965597b7d..c3419518d70 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h | |||
@@ -13,18 +13,6 @@ | |||
13 | 13 | ||
14 | #define DW_DMA_MAX_NR_CHANNELS 8 | 14 | #define DW_DMA_MAX_NR_CHANNELS 8 |
15 | 15 | ||
16 | /* flow controller */ | ||
17 | enum dw_dma_fc { | ||
18 | DW_DMA_FC_D_M2M, | ||
19 | DW_DMA_FC_D_M2P, | ||
20 | DW_DMA_FC_D_P2M, | ||
21 | DW_DMA_FC_D_P2P, | ||
22 | DW_DMA_FC_P_P2M, | ||
23 | DW_DMA_FC_SP_P2P, | ||
24 | DW_DMA_FC_P_M2P, | ||
25 | DW_DMA_FC_DP_P2P, | ||
26 | }; | ||
27 | |||
28 | /* | 16 | /* |
29 | * Redefine this macro to handle differences between 32- and 64-bit | 17 | * Redefine this macro to handle differences between 32- and 64-bit |
30 | * addressing, big vs. little endian, etc. | 18 | * addressing, big vs. little endian, etc. |
@@ -82,47 +70,9 @@ struct dw_dma_regs { | |||
82 | DW_REG(ID); | 70 | DW_REG(ID); |
83 | DW_REG(TEST); | 71 | DW_REG(TEST); |
84 | 72 | ||
85 | /* reserved */ | 73 | /* optional encoded params, 0x3c8..0x3 */ |
86 | DW_REG(__reserved0); | ||
87 | DW_REG(__reserved1); | ||
88 | |||
89 | /* optional encoded params, 0x3c8..0x3f7 */ | ||
90 | u32 __reserved; | ||
91 | |||
92 | /* per-channel configuration registers */ | ||
93 | u32 DWC_PARAMS[DW_DMA_MAX_NR_CHANNELS]; | ||
94 | u32 MULTI_BLK_TYPE; | ||
95 | u32 MAX_BLK_SIZE; | ||
96 | |||
97 | /* top-level parameters */ | ||
98 | u32 DW_PARAMS; | ||
99 | }; | 74 | }; |
100 | 75 | ||
101 | #ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO | ||
102 | #define dma_readl_native ioread32be | ||
103 | #define dma_writel_native iowrite32be | ||
104 | #else | ||
105 | #define dma_readl_native readl | ||
106 | #define dma_writel_native writel | ||
107 | #endif | ||
108 | |||
109 | /* To access the registers in early stage of probe */ | ||
110 | #define dma_read_byaddr(addr, name) \ | ||
111 | dma_readl_native((addr) + offsetof(struct dw_dma_regs, name)) | ||
112 | |||
113 | /* Bitfields in DW_PARAMS */ | ||
114 | #define DW_PARAMS_NR_CHAN 8 /* number of channels */ | ||
115 | #define DW_PARAMS_NR_MASTER 11 /* number of AHB masters */ | ||
116 | #define DW_PARAMS_DATA_WIDTH(n) (15 + 2 * (n)) | ||
117 | #define DW_PARAMS_DATA_WIDTH1 15 /* master 1 data width */ | ||
118 | #define DW_PARAMS_DATA_WIDTH2 17 /* master 2 data width */ | ||
119 | #define DW_PARAMS_DATA_WIDTH3 19 /* master 3 data width */ | ||
120 | #define DW_PARAMS_DATA_WIDTH4 21 /* master 4 data width */ | ||
121 | #define DW_PARAMS_EN 28 /* encoded parameters */ | ||
122 | |||
123 | /* Bitfields in DWC_PARAMS */ | ||
124 | #define DWC_PARAMS_MBLK_EN 11 /* multi block transfer */ | ||
125 | |||
126 | /* Bitfields in CTL_LO */ | 76 | /* Bitfields in CTL_LO */ |
127 | #define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */ | 77 | #define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */ |
128 | #define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */ | 78 | #define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */ |
@@ -178,9 +128,10 @@ struct dw_dma_regs { | |||
178 | /* Bitfields in CFG */ | 128 | /* Bitfields in CFG */ |
179 | #define DW_CFG_DMA_EN (1 << 0) | 129 | #define DW_CFG_DMA_EN (1 << 0) |
180 | 130 | ||
131 | #define DW_REGLEN 0x400 | ||
132 | |||
181 | enum dw_dmac_flags { | 133 | enum dw_dmac_flags { |
182 | DW_DMA_IS_CYCLIC = 0, | 134 | DW_DMA_IS_CYCLIC = 0, |
183 | DW_DMA_IS_SOFT_LLP = 1, | ||
184 | }; | 135 | }; |
185 | 136 | ||
186 | struct dw_dma_chan { | 137 | struct dw_dma_chan { |
@@ -189,32 +140,18 @@ struct dw_dma_chan { | |||
189 | u8 mask; | 140 | u8 mask; |
190 | u8 priority; | 141 | u8 priority; |
191 | bool paused; | 142 | bool paused; |
192 | bool initialized; | ||
193 | |||
194 | /* software emulation of the LLP transfers */ | ||
195 | struct list_head *tx_list; | ||
196 | struct list_head *tx_node_active; | ||
197 | 143 | ||
198 | spinlock_t lock; | 144 | spinlock_t lock; |
199 | 145 | ||
200 | /* these other elements are all protected by lock */ | 146 | /* these other elements are all protected by lock */ |
201 | unsigned long flags; | 147 | unsigned long flags; |
148 | dma_cookie_t completed; | ||
202 | struct list_head active_list; | 149 | struct list_head active_list; |
203 | struct list_head queue; | 150 | struct list_head queue; |
204 | struct list_head free_list; | 151 | struct list_head free_list; |
205 | struct dw_cyclic_desc *cdesc; | 152 | struct dw_cyclic_desc *cdesc; |
206 | 153 | ||
207 | unsigned int descs_allocated; | 154 | unsigned int descs_allocated; |
208 | |||
209 | /* hardware configuration */ | ||
210 | unsigned int block_size; | ||
211 | bool nollp; | ||
212 | |||
213 | /* configuration passed via DMA_SLAVE_CONFIG */ | ||
214 | struct dma_slave_config dma_sconfig; | ||
215 | |||
216 | /* backlink to dw_dma */ | ||
217 | struct dw_dma *dw; | ||
218 | }; | 155 | }; |
219 | 156 | ||
220 | static inline struct dw_dma_chan_regs __iomem * | 157 | static inline struct dw_dma_chan_regs __iomem * |
@@ -224,9 +161,9 @@ __dwc_regs(struct dw_dma_chan *dwc) | |||
224 | } | 161 | } |
225 | 162 | ||
226 | #define channel_readl(dwc, name) \ | 163 | #define channel_readl(dwc, name) \ |
227 | dma_readl_native(&(__dwc_regs(dwc)->name)) | 164 | readl(&(__dwc_regs(dwc)->name)) |
228 | #define channel_writel(dwc, name, val) \ | 165 | #define channel_writel(dwc, name, val) \ |
229 | dma_writel_native((val), &(__dwc_regs(dwc)->name)) | 166 | writel((val), &(__dwc_regs(dwc)->name)) |
230 | 167 | ||
231 | static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) | 168 | static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) |
232 | { | 169 | { |
@@ -241,10 +178,6 @@ struct dw_dma { | |||
241 | 178 | ||
242 | u8 all_chan_mask; | 179 | u8 all_chan_mask; |
243 | 180 | ||
244 | /* hardware configuration */ | ||
245 | unsigned char nr_masters; | ||
246 | unsigned char data_width[4]; | ||
247 | |||
248 | struct dw_dma_chan chan[0]; | 181 | struct dw_dma_chan chan[0]; |
249 | }; | 182 | }; |
250 | 183 | ||
@@ -254,9 +187,9 @@ static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw) | |||
254 | } | 187 | } |
255 | 188 | ||
256 | #define dma_readl(dw, name) \ | 189 | #define dma_readl(dw, name) \ |
257 | dma_readl_native(&(__dw_regs(dw)->name)) | 190 | readl(&(__dw_regs(dw)->name)) |
258 | #define dma_writel(dw, name, val) \ | 191 | #define dma_writel(dw, name, val) \ |
259 | dma_writel_native((val), &(__dw_regs(dw)->name)) | 192 | writel((val), &(__dw_regs(dw)->name)) |
260 | 193 | ||
261 | #define channel_set_bit(dw, reg, mask) \ | 194 | #define channel_set_bit(dw, reg, mask) \ |
262 | dma_writel(dw, reg, ((mask) << 8) | (mask)) | 195 | dma_writel(dw, reg, ((mask) << 8) | (mask)) |
@@ -271,9 +204,9 @@ static inline struct dw_dma *to_dw_dma(struct dma_device *ddev) | |||
271 | /* LLI == Linked List Item; a.k.a. DMA block descriptor */ | 204 | /* LLI == Linked List Item; a.k.a. DMA block descriptor */ |
272 | struct dw_lli { | 205 | struct dw_lli { |
273 | /* values that are not changed by hardware */ | 206 | /* values that are not changed by hardware */ |
274 | u32 sar; | 207 | dma_addr_t sar; |
275 | u32 dar; | 208 | dma_addr_t dar; |
276 | u32 llp; /* chain to next lli */ | 209 | dma_addr_t llp; /* chain to next lli */ |
277 | u32 ctllo; | 210 | u32 ctllo; |
278 | /* values that may get written back: */ | 211 | /* values that may get written back: */ |
279 | u32 ctlhi; | 212 | u32 ctlhi; |
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c deleted file mode 100644 index f424298f1ac..00000000000 --- a/drivers/dma/edma.c +++ /dev/null | |||
@@ -1,671 +0,0 @@ | |||
1 | /* | ||
2 | * TI EDMA DMA engine driver | ||
3 | * | ||
4 | * Copyright 2012 Texas Instruments | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License as | ||
8 | * published by the Free Software Foundation version 2. | ||
9 | * | ||
10 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
11 | * kind, whether express or implied; without even the implied warranty | ||
12 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | ||
15 | |||
16 | #include <linux/dmaengine.h> | ||
17 | #include <linux/dma-mapping.h> | ||
18 | #include <linux/err.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/list.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/spinlock.h> | ||
26 | |||
27 | #include <mach/edma.h> | ||
28 | |||
29 | #include "dmaengine.h" | ||
30 | #include "virt-dma.h" | ||
31 | |||
32 | /* | ||
33 | * This will go away when the private EDMA API is folded | ||
34 | * into this driver and the platform device(s) are | ||
35 | * instantiated in the arch code. We can only get away | ||
36 | * with this simplification because DA8XX may not be built | ||
37 | * in the same kernel image with other DaVinci parts. This | ||
38 | * avoids having to sprinkle dmaengine driver platform devices | ||
39 | * and data throughout all the existing board files. | ||
40 | */ | ||
41 | #ifdef CONFIG_ARCH_DAVINCI_DA8XX | ||
42 | #define EDMA_CTLRS 2 | ||
43 | #define EDMA_CHANS 32 | ||
44 | #else | ||
45 | #define EDMA_CTLRS 1 | ||
46 | #define EDMA_CHANS 64 | ||
47 | #endif /* CONFIG_ARCH_DAVINCI_DA8XX */ | ||
48 | |||
49 | /* Max of 16 segments per channel to conserve PaRAM slots */ | ||
50 | #define MAX_NR_SG 16 | ||
51 | #define EDMA_MAX_SLOTS MAX_NR_SG | ||
52 | #define EDMA_DESCRIPTORS 16 | ||
53 | |||
54 | struct edma_desc { | ||
55 | struct virt_dma_desc vdesc; | ||
56 | struct list_head node; | ||
57 | int absync; | ||
58 | int pset_nr; | ||
59 | struct edmacc_param pset[0]; | ||
60 | }; | ||
61 | |||
62 | struct edma_cc; | ||
63 | |||
64 | struct edma_chan { | ||
65 | struct virt_dma_chan vchan; | ||
66 | struct list_head node; | ||
67 | struct edma_desc *edesc; | ||
68 | struct edma_cc *ecc; | ||
69 | int ch_num; | ||
70 | bool alloced; | ||
71 | int slot[EDMA_MAX_SLOTS]; | ||
72 | dma_addr_t addr; | ||
73 | int addr_width; | ||
74 | int maxburst; | ||
75 | }; | ||
76 | |||
77 | struct edma_cc { | ||
78 | int ctlr; | ||
79 | struct dma_device dma_slave; | ||
80 | struct edma_chan slave_chans[EDMA_CHANS]; | ||
81 | int num_slave_chans; | ||
82 | int dummy_slot; | ||
83 | }; | ||
84 | |||
85 | static inline struct edma_cc *to_edma_cc(struct dma_device *d) | ||
86 | { | ||
87 | return container_of(d, struct edma_cc, dma_slave); | ||
88 | } | ||
89 | |||
90 | static inline struct edma_chan *to_edma_chan(struct dma_chan *c) | ||
91 | { | ||
92 | return container_of(c, struct edma_chan, vchan.chan); | ||
93 | } | ||
94 | |||
95 | static inline struct edma_desc | ||
96 | *to_edma_desc(struct dma_async_tx_descriptor *tx) | ||
97 | { | ||
98 | return container_of(tx, struct edma_desc, vdesc.tx); | ||
99 | } | ||
100 | |||
101 | static void edma_desc_free(struct virt_dma_desc *vdesc) | ||
102 | { | ||
103 | kfree(container_of(vdesc, struct edma_desc, vdesc)); | ||
104 | } | ||
105 | |||
106 | /* Dispatch a queued descriptor to the controller (caller holds lock) */ | ||
107 | static void edma_execute(struct edma_chan *echan) | ||
108 | { | ||
109 | struct virt_dma_desc *vdesc = vchan_next_desc(&echan->vchan); | ||
110 | struct edma_desc *edesc; | ||
111 | int i; | ||
112 | |||
113 | if (!vdesc) { | ||
114 | echan->edesc = NULL; | ||
115 | return; | ||
116 | } | ||
117 | |||
118 | list_del(&vdesc->node); | ||
119 | |||
120 | echan->edesc = edesc = to_edma_desc(&vdesc->tx); | ||
121 | |||
122 | /* Write descriptor PaRAM set(s) */ | ||
123 | for (i = 0; i < edesc->pset_nr; i++) { | ||
124 | edma_write_slot(echan->slot[i], &edesc->pset[i]); | ||
125 | dev_dbg(echan->vchan.chan.device->dev, | ||
126 | "\n pset[%d]:\n" | ||
127 | " chnum\t%d\n" | ||
128 | " slot\t%d\n" | ||
129 | " opt\t%08x\n" | ||
130 | " src\t%08x\n" | ||
131 | " dst\t%08x\n" | ||
132 | " abcnt\t%08x\n" | ||
133 | " ccnt\t%08x\n" | ||
134 | " bidx\t%08x\n" | ||
135 | " cidx\t%08x\n" | ||
136 | " lkrld\t%08x\n", | ||
137 | i, echan->ch_num, echan->slot[i], | ||
138 | edesc->pset[i].opt, | ||
139 | edesc->pset[i].src, | ||
140 | edesc->pset[i].dst, | ||
141 | edesc->pset[i].a_b_cnt, | ||
142 | edesc->pset[i].ccnt, | ||
143 | edesc->pset[i].src_dst_bidx, | ||
144 | edesc->pset[i].src_dst_cidx, | ||
145 | edesc->pset[i].link_bcntrld); | ||
146 | /* Link to the previous slot if not the last set */ | ||
147 | if (i != (edesc->pset_nr - 1)) | ||
148 | edma_link(echan->slot[i], echan->slot[i+1]); | ||
149 | /* Final pset links to the dummy pset */ | ||
150 | else | ||
151 | edma_link(echan->slot[i], echan->ecc->dummy_slot); | ||
152 | } | ||
153 | |||
154 | edma_start(echan->ch_num); | ||
155 | } | ||
156 | |||
157 | static int edma_terminate_all(struct edma_chan *echan) | ||
158 | { | ||
159 | unsigned long flags; | ||
160 | LIST_HEAD(head); | ||
161 | |||
162 | spin_lock_irqsave(&echan->vchan.lock, flags); | ||
163 | |||
164 | /* | ||
165 | * Stop DMA activity: we assume the callback will not be called | ||
166 | * after edma_dma() returns (even if it does, it will see | ||
167 | * echan->edesc is NULL and exit.) | ||
168 | */ | ||
169 | if (echan->edesc) { | ||
170 | echan->edesc = NULL; | ||
171 | edma_stop(echan->ch_num); | ||
172 | } | ||
173 | |||
174 | vchan_get_all_descriptors(&echan->vchan, &head); | ||
175 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | ||
176 | vchan_dma_desc_free_list(&echan->vchan, &head); | ||
177 | |||
178 | return 0; | ||
179 | } | ||
180 | |||
181 | |||
182 | static int edma_slave_config(struct edma_chan *echan, | ||
183 | struct dma_slave_config *config) | ||
184 | { | ||
185 | if ((config->src_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES) || | ||
186 | (config->dst_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)) | ||
187 | return -EINVAL; | ||
188 | |||
189 | if (config->direction == DMA_MEM_TO_DEV) { | ||
190 | if (config->dst_addr) | ||
191 | echan->addr = config->dst_addr; | ||
192 | if (config->dst_addr_width) | ||
193 | echan->addr_width = config->dst_addr_width; | ||
194 | if (config->dst_maxburst) | ||
195 | echan->maxburst = config->dst_maxburst; | ||
196 | } else if (config->direction == DMA_DEV_TO_MEM) { | ||
197 | if (config->src_addr) | ||
198 | echan->addr = config->src_addr; | ||
199 | if (config->src_addr_width) | ||
200 | echan->addr_width = config->src_addr_width; | ||
201 | if (config->src_maxburst) | ||
202 | echan->maxburst = config->src_maxburst; | ||
203 | } | ||
204 | |||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
209 | unsigned long arg) | ||
210 | { | ||
211 | int ret = 0; | ||
212 | struct dma_slave_config *config; | ||
213 | struct edma_chan *echan = to_edma_chan(chan); | ||
214 | |||
215 | switch (cmd) { | ||
216 | case DMA_TERMINATE_ALL: | ||
217 | edma_terminate_all(echan); | ||
218 | break; | ||
219 | case DMA_SLAVE_CONFIG: | ||
220 | config = (struct dma_slave_config *)arg; | ||
221 | ret = edma_slave_config(echan, config); | ||
222 | break; | ||
223 | default: | ||
224 | ret = -ENOSYS; | ||
225 | } | ||
226 | |||
227 | return ret; | ||
228 | } | ||
229 | |||
230 | static struct dma_async_tx_descriptor *edma_prep_slave_sg( | ||
231 | struct dma_chan *chan, struct scatterlist *sgl, | ||
232 | unsigned int sg_len, enum dma_transfer_direction direction, | ||
233 | unsigned long tx_flags, void *context) | ||
234 | { | ||
235 | struct edma_chan *echan = to_edma_chan(chan); | ||
236 | struct device *dev = chan->device->dev; | ||
237 | struct edma_desc *edesc; | ||
238 | struct scatterlist *sg; | ||
239 | int i; | ||
240 | int acnt, bcnt, ccnt, src, dst, cidx; | ||
241 | int src_bidx, dst_bidx, src_cidx, dst_cidx; | ||
242 | |||
243 | if (unlikely(!echan || !sgl || !sg_len)) | ||
244 | return NULL; | ||
245 | |||
246 | if (echan->addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { | ||
247 | dev_err(dev, "Undefined slave buswidth\n"); | ||
248 | return NULL; | ||
249 | } | ||
250 | |||
251 | if (sg_len > MAX_NR_SG) { | ||
252 | dev_err(dev, "Exceeded max SG segments %d > %d\n", | ||
253 | sg_len, MAX_NR_SG); | ||
254 | return NULL; | ||
255 | } | ||
256 | |||
257 | edesc = kzalloc(sizeof(*edesc) + sg_len * | ||
258 | sizeof(edesc->pset[0]), GFP_ATOMIC); | ||
259 | if (!edesc) { | ||
260 | dev_dbg(dev, "Failed to allocate a descriptor\n"); | ||
261 | return NULL; | ||
262 | } | ||
263 | |||
264 | edesc->pset_nr = sg_len; | ||
265 | |||
266 | for_each_sg(sgl, sg, sg_len, i) { | ||
267 | /* Allocate a PaRAM slot, if needed */ | ||
268 | if (echan->slot[i] < 0) { | ||
269 | echan->slot[i] = | ||
270 | edma_alloc_slot(EDMA_CTLR(echan->ch_num), | ||
271 | EDMA_SLOT_ANY); | ||
272 | if (echan->slot[i] < 0) { | ||
273 | dev_err(dev, "Failed to allocate slot\n"); | ||
274 | return NULL; | ||
275 | } | ||
276 | } | ||
277 | |||
278 | acnt = echan->addr_width; | ||
279 | |||
280 | /* | ||
281 | * If the maxburst is equal to the fifo width, use | ||
282 | * A-synced transfers. This allows for large contiguous | ||
283 | * buffer transfers using only one PaRAM set. | ||
284 | */ | ||
285 | if (echan->maxburst == 1) { | ||
286 | edesc->absync = false; | ||
287 | ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1); | ||
288 | bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1); | ||
289 | if (bcnt) | ||
290 | ccnt++; | ||
291 | else | ||
292 | bcnt = SZ_64K - 1; | ||
293 | cidx = acnt; | ||
294 | /* | ||
295 | * If maxburst is greater than the fifo address_width, | ||
296 | * use AB-synced transfers where A count is the fifo | ||
297 | * address_width and B count is the maxburst. In this | ||
298 | * case, we are limited to transfers of C count frames | ||
299 | * of (address_width * maxburst) where C count is limited | ||
300 | * to SZ_64K-1. This places an upper bound on the length | ||
301 | * of an SG segment that can be handled. | ||
302 | */ | ||
303 | } else { | ||
304 | edesc->absync = true; | ||
305 | bcnt = echan->maxburst; | ||
306 | ccnt = sg_dma_len(sg) / (acnt * bcnt); | ||
307 | if (ccnt > (SZ_64K - 1)) { | ||
308 | dev_err(dev, "Exceeded max SG segment size\n"); | ||
309 | return NULL; | ||
310 | } | ||
311 | cidx = acnt * bcnt; | ||
312 | } | ||
313 | |||
314 | if (direction == DMA_MEM_TO_DEV) { | ||
315 | src = sg_dma_address(sg); | ||
316 | dst = echan->addr; | ||
317 | src_bidx = acnt; | ||
318 | src_cidx = cidx; | ||
319 | dst_bidx = 0; | ||
320 | dst_cidx = 0; | ||
321 | } else { | ||
322 | src = echan->addr; | ||
323 | dst = sg_dma_address(sg); | ||
324 | src_bidx = 0; | ||
325 | src_cidx = 0; | ||
326 | dst_bidx = acnt; | ||
327 | dst_cidx = cidx; | ||
328 | } | ||
329 | |||
330 | edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); | ||
331 | /* Configure A or AB synchronized transfers */ | ||
332 | if (edesc->absync) | ||
333 | edesc->pset[i].opt |= SYNCDIM; | ||
334 | /* If this is the last set, enable completion interrupt flag */ | ||
335 | if (i == sg_len - 1) | ||
336 | edesc->pset[i].opt |= TCINTEN; | ||
337 | |||
338 | edesc->pset[i].src = src; | ||
339 | edesc->pset[i].dst = dst; | ||
340 | |||
341 | edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx; | ||
342 | edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx; | ||
343 | |||
344 | edesc->pset[i].a_b_cnt = bcnt << 16 | acnt; | ||
345 | edesc->pset[i].ccnt = ccnt; | ||
346 | edesc->pset[i].link_bcntrld = 0xffffffff; | ||
347 | |||
348 | } | ||
349 | |||
350 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); | ||
351 | } | ||
352 | |||
353 | static void edma_callback(unsigned ch_num, u16 ch_status, void *data) | ||
354 | { | ||
355 | struct edma_chan *echan = data; | ||
356 | struct device *dev = echan->vchan.chan.device->dev; | ||
357 | struct edma_desc *edesc; | ||
358 | unsigned long flags; | ||
359 | |||
360 | /* Stop the channel */ | ||
361 | edma_stop(echan->ch_num); | ||
362 | |||
363 | switch (ch_status) { | ||
364 | case DMA_COMPLETE: | ||
365 | dev_dbg(dev, "transfer complete on channel %d\n", ch_num); | ||
366 | |||
367 | spin_lock_irqsave(&echan->vchan.lock, flags); | ||
368 | |||
369 | edesc = echan->edesc; | ||
370 | if (edesc) { | ||
371 | edma_execute(echan); | ||
372 | vchan_cookie_complete(&edesc->vdesc); | ||
373 | } | ||
374 | |||
375 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | ||
376 | |||
377 | break; | ||
378 | case DMA_CC_ERROR: | ||
379 | dev_dbg(dev, "transfer error on channel %d\n", ch_num); | ||
380 | break; | ||
381 | default: | ||
382 | break; | ||
383 | } | ||
384 | } | ||
385 | |||
386 | /* Alloc channel resources */ | ||
387 | static int edma_alloc_chan_resources(struct dma_chan *chan) | ||
388 | { | ||
389 | struct edma_chan *echan = to_edma_chan(chan); | ||
390 | struct device *dev = chan->device->dev; | ||
391 | int ret; | ||
392 | int a_ch_num; | ||
393 | LIST_HEAD(descs); | ||
394 | |||
395 | a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback, | ||
396 | chan, EVENTQ_DEFAULT); | ||
397 | |||
398 | if (a_ch_num < 0) { | ||
399 | ret = -ENODEV; | ||
400 | goto err_no_chan; | ||
401 | } | ||
402 | |||
403 | if (a_ch_num != echan->ch_num) { | ||
404 | dev_err(dev, "failed to allocate requested channel %u:%u\n", | ||
405 | EDMA_CTLR(echan->ch_num), | ||
406 | EDMA_CHAN_SLOT(echan->ch_num)); | ||
407 | ret = -ENODEV; | ||
408 | goto err_wrong_chan; | ||
409 | } | ||
410 | |||
411 | echan->alloced = true; | ||
412 | echan->slot[0] = echan->ch_num; | ||
413 | |||
414 | dev_info(dev, "allocated channel for %u:%u\n", | ||
415 | EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); | ||
416 | |||
417 | return 0; | ||
418 | |||
419 | err_wrong_chan: | ||
420 | edma_free_channel(a_ch_num); | ||
421 | err_no_chan: | ||
422 | return ret; | ||
423 | } | ||
424 | |||
425 | /* Free channel resources */ | ||
426 | static void edma_free_chan_resources(struct dma_chan *chan) | ||
427 | { | ||
428 | struct edma_chan *echan = to_edma_chan(chan); | ||
429 | struct device *dev = chan->device->dev; | ||
430 | int i; | ||
431 | |||
432 | /* Terminate transfers */ | ||
433 | edma_stop(echan->ch_num); | ||
434 | |||
435 | vchan_free_chan_resources(&echan->vchan); | ||
436 | |||
437 | /* Free EDMA PaRAM slots */ | ||
438 | for (i = 1; i < EDMA_MAX_SLOTS; i++) { | ||
439 | if (echan->slot[i] >= 0) { | ||
440 | edma_free_slot(echan->slot[i]); | ||
441 | echan->slot[i] = -1; | ||
442 | } | ||
443 | } | ||
444 | |||
445 | /* Free EDMA channel */ | ||
446 | if (echan->alloced) { | ||
447 | edma_free_channel(echan->ch_num); | ||
448 | echan->alloced = false; | ||
449 | } | ||
450 | |||
451 | dev_info(dev, "freeing channel for %u\n", echan->ch_num); | ||
452 | } | ||
453 | |||
454 | /* Send pending descriptor to hardware */ | ||
455 | static void edma_issue_pending(struct dma_chan *chan) | ||
456 | { | ||
457 | struct edma_chan *echan = to_edma_chan(chan); | ||
458 | unsigned long flags; | ||
459 | |||
460 | spin_lock_irqsave(&echan->vchan.lock, flags); | ||
461 | if (vchan_issue_pending(&echan->vchan) && !echan->edesc) | ||
462 | edma_execute(echan); | ||
463 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | ||
464 | } | ||
465 | |||
466 | static size_t edma_desc_size(struct edma_desc *edesc) | ||
467 | { | ||
468 | int i; | ||
469 | size_t size; | ||
470 | |||
471 | if (edesc->absync) | ||
472 | for (size = i = 0; i < edesc->pset_nr; i++) | ||
473 | size += (edesc->pset[i].a_b_cnt & 0xffff) * | ||
474 | (edesc->pset[i].a_b_cnt >> 16) * | ||
475 | edesc->pset[i].ccnt; | ||
476 | else | ||
477 | size = (edesc->pset[0].a_b_cnt & 0xffff) * | ||
478 | (edesc->pset[0].a_b_cnt >> 16) + | ||
479 | (edesc->pset[0].a_b_cnt & 0xffff) * | ||
480 | (SZ_64K - 1) * edesc->pset[0].ccnt; | ||
481 | |||
482 | return size; | ||
483 | } | ||
484 | |||
485 | /* Check request completion status */ | ||
486 | static enum dma_status edma_tx_status(struct dma_chan *chan, | ||
487 | dma_cookie_t cookie, | ||
488 | struct dma_tx_state *txstate) | ||
489 | { | ||
490 | struct edma_chan *echan = to_edma_chan(chan); | ||
491 | struct virt_dma_desc *vdesc; | ||
492 | enum dma_status ret; | ||
493 | unsigned long flags; | ||
494 | |||
495 | ret = dma_cookie_status(chan, cookie, txstate); | ||
496 | if (ret == DMA_SUCCESS || !txstate) | ||
497 | return ret; | ||
498 | |||
499 | spin_lock_irqsave(&echan->vchan.lock, flags); | ||
500 | vdesc = vchan_find_desc(&echan->vchan, cookie); | ||
501 | if (vdesc) { | ||
502 | txstate->residue = edma_desc_size(to_edma_desc(&vdesc->tx)); | ||
503 | } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) { | ||
504 | struct edma_desc *edesc = echan->edesc; | ||
505 | txstate->residue = edma_desc_size(edesc); | ||
506 | } else { | ||
507 | txstate->residue = 0; | ||
508 | } | ||
509 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | ||
510 | |||
511 | return ret; | ||
512 | } | ||
513 | |||
514 | static void __init edma_chan_init(struct edma_cc *ecc, | ||
515 | struct dma_device *dma, | ||
516 | struct edma_chan *echans) | ||
517 | { | ||
518 | int i, j; | ||
519 | |||
520 | for (i = 0; i < EDMA_CHANS; i++) { | ||
521 | struct edma_chan *echan = &echans[i]; | ||
522 | echan->ch_num = EDMA_CTLR_CHAN(ecc->ctlr, i); | ||
523 | echan->ecc = ecc; | ||
524 | echan->vchan.desc_free = edma_desc_free; | ||
525 | |||
526 | vchan_init(&echan->vchan, dma); | ||
527 | |||
528 | INIT_LIST_HEAD(&echan->node); | ||
529 | for (j = 0; j < EDMA_MAX_SLOTS; j++) | ||
530 | echan->slot[j] = -1; | ||
531 | } | ||
532 | } | ||
533 | |||
534 | static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, | ||
535 | struct device *dev) | ||
536 | { | ||
537 | dma->device_prep_slave_sg = edma_prep_slave_sg; | ||
538 | dma->device_alloc_chan_resources = edma_alloc_chan_resources; | ||
539 | dma->device_free_chan_resources = edma_free_chan_resources; | ||
540 | dma->device_issue_pending = edma_issue_pending; | ||
541 | dma->device_tx_status = edma_tx_status; | ||
542 | dma->device_control = edma_control; | ||
543 | dma->dev = dev; | ||
544 | |||
545 | INIT_LIST_HEAD(&dma->channels); | ||
546 | } | ||
547 | |||
548 | static int edma_probe(struct platform_device *pdev) | ||
549 | { | ||
550 | struct edma_cc *ecc; | ||
551 | int ret; | ||
552 | |||
553 | ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL); | ||
554 | if (!ecc) { | ||
555 | dev_err(&pdev->dev, "Can't allocate controller\n"); | ||
556 | return -ENOMEM; | ||
557 | } | ||
558 | |||
559 | ecc->ctlr = pdev->id; | ||
560 | ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY); | ||
561 | if (ecc->dummy_slot < 0) { | ||
562 | dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n"); | ||
563 | return -EIO; | ||
564 | } | ||
565 | |||
566 | dma_cap_zero(ecc->dma_slave.cap_mask); | ||
567 | dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask); | ||
568 | |||
569 | edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev); | ||
570 | |||
571 | edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans); | ||
572 | |||
573 | ret = dma_async_device_register(&ecc->dma_slave); | ||
574 | if (ret) | ||
575 | goto err_reg1; | ||
576 | |||
577 | platform_set_drvdata(pdev, ecc); | ||
578 | |||
579 | dev_info(&pdev->dev, "TI EDMA DMA engine driver\n"); | ||
580 | |||
581 | return 0; | ||
582 | |||
583 | err_reg1: | ||
584 | edma_free_slot(ecc->dummy_slot); | ||
585 | return ret; | ||
586 | } | ||
587 | |||
588 | static int edma_remove(struct platform_device *pdev) | ||
589 | { | ||
590 | struct device *dev = &pdev->dev; | ||
591 | struct edma_cc *ecc = dev_get_drvdata(dev); | ||
592 | |||
593 | dma_async_device_unregister(&ecc->dma_slave); | ||
594 | edma_free_slot(ecc->dummy_slot); | ||
595 | |||
596 | return 0; | ||
597 | } | ||
598 | |||
599 | static struct platform_driver edma_driver = { | ||
600 | .probe = edma_probe, | ||
601 | .remove = edma_remove, | ||
602 | .driver = { | ||
603 | .name = "edma-dma-engine", | ||
604 | .owner = THIS_MODULE, | ||
605 | }, | ||
606 | }; | ||
607 | |||
608 | bool edma_filter_fn(struct dma_chan *chan, void *param) | ||
609 | { | ||
610 | if (chan->device->dev->driver == &edma_driver.driver) { | ||
611 | struct edma_chan *echan = to_edma_chan(chan); | ||
612 | unsigned ch_req = *(unsigned *)param; | ||
613 | return ch_req == echan->ch_num; | ||
614 | } | ||
615 | return false; | ||
616 | } | ||
617 | EXPORT_SYMBOL(edma_filter_fn); | ||
618 | |||
619 | static struct platform_device *pdev0, *pdev1; | ||
620 | |||
621 | static const struct platform_device_info edma_dev_info0 = { | ||
622 | .name = "edma-dma-engine", | ||
623 | .id = 0, | ||
624 | .dma_mask = DMA_BIT_MASK(32), | ||
625 | }; | ||
626 | |||
627 | static const struct platform_device_info edma_dev_info1 = { | ||
628 | .name = "edma-dma-engine", | ||
629 | .id = 1, | ||
630 | .dma_mask = DMA_BIT_MASK(32), | ||
631 | }; | ||
632 | |||
633 | static int edma_init(void) | ||
634 | { | ||
635 | int ret = platform_driver_register(&edma_driver); | ||
636 | |||
637 | if (ret == 0) { | ||
638 | pdev0 = platform_device_register_full(&edma_dev_info0); | ||
639 | if (IS_ERR(pdev0)) { | ||
640 | platform_driver_unregister(&edma_driver); | ||
641 | ret = PTR_ERR(pdev0); | ||
642 | goto out; | ||
643 | } | ||
644 | } | ||
645 | |||
646 | if (EDMA_CTLRS == 2) { | ||
647 | pdev1 = platform_device_register_full(&edma_dev_info1); | ||
648 | if (IS_ERR(pdev1)) { | ||
649 | platform_driver_unregister(&edma_driver); | ||
650 | platform_device_unregister(pdev0); | ||
651 | ret = PTR_ERR(pdev1); | ||
652 | } | ||
653 | } | ||
654 | |||
655 | out: | ||
656 | return ret; | ||
657 | } | ||
658 | subsys_initcall(edma_init); | ||
659 | |||
660 | static void __exit edma_exit(void) | ||
661 | { | ||
662 | platform_device_unregister(pdev0); | ||
663 | if (pdev1) | ||
664 | platform_device_unregister(pdev1); | ||
665 | platform_driver_unregister(&edma_driver); | ||
666 | } | ||
667 | module_exit(edma_exit); | ||
668 | |||
669 | MODULE_AUTHOR("Matt Porter <mporter@ti.com>"); | ||
670 | MODULE_DESCRIPTION("TI EDMA DMA engine driver"); | ||
671 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index bcfde400904..5d7a49bd7c2 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c | |||
@@ -22,13 +22,10 @@ | |||
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
24 | #include <linux/dmaengine.h> | 24 | #include <linux/dmaengine.h> |
25 | #include <linux/module.h> | ||
26 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
27 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
28 | 27 | ||
29 | #include <linux/platform_data/dma-ep93xx.h> | 28 | #include <mach/dma.h> |
30 | |||
31 | #include "dmaengine.h" | ||
32 | 29 | ||
33 | /* M2P registers */ | 30 | /* M2P registers */ |
34 | #define M2P_CONTROL 0x0000 | 31 | #define M2P_CONTROL 0x0000 |
@@ -71,7 +68,6 @@ | |||
71 | #define M2M_CONTROL_TM_SHIFT 13 | 68 | #define M2M_CONTROL_TM_SHIFT 13 |
72 | #define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT) | 69 | #define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT) |
73 | #define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT) | 70 | #define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT) |
74 | #define M2M_CONTROL_NFBINT BIT(21) | ||
75 | #define M2M_CONTROL_RSS_SHIFT 22 | 71 | #define M2M_CONTROL_RSS_SHIFT 22 |
76 | #define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT) | 72 | #define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT) |
77 | #define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT) | 73 | #define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT) |
@@ -80,22 +76,7 @@ | |||
80 | #define M2M_CONTROL_PWSC_SHIFT 25 | 76 | #define M2M_CONTROL_PWSC_SHIFT 25 |
81 | 77 | ||
82 | #define M2M_INTERRUPT 0x0004 | 78 | #define M2M_INTERRUPT 0x0004 |
83 | #define M2M_INTERRUPT_MASK 6 | 79 | #define M2M_INTERRUPT_DONEINT BIT(1) |
84 | |||
85 | #define M2M_STATUS 0x000c | ||
86 | #define M2M_STATUS_CTL_SHIFT 1 | ||
87 | #define M2M_STATUS_CTL_IDLE (0 << M2M_STATUS_CTL_SHIFT) | ||
88 | #define M2M_STATUS_CTL_STALL (1 << M2M_STATUS_CTL_SHIFT) | ||
89 | #define M2M_STATUS_CTL_MEMRD (2 << M2M_STATUS_CTL_SHIFT) | ||
90 | #define M2M_STATUS_CTL_MEMWR (3 << M2M_STATUS_CTL_SHIFT) | ||
91 | #define M2M_STATUS_CTL_BWCWAIT (4 << M2M_STATUS_CTL_SHIFT) | ||
92 | #define M2M_STATUS_CTL_MASK (7 << M2M_STATUS_CTL_SHIFT) | ||
93 | #define M2M_STATUS_BUF_SHIFT 4 | ||
94 | #define M2M_STATUS_BUF_NO (0 << M2M_STATUS_BUF_SHIFT) | ||
95 | #define M2M_STATUS_BUF_ON (1 << M2M_STATUS_BUF_SHIFT) | ||
96 | #define M2M_STATUS_BUF_NEXT (2 << M2M_STATUS_BUF_SHIFT) | ||
97 | #define M2M_STATUS_BUF_MASK (3 << M2M_STATUS_BUF_SHIFT) | ||
98 | #define M2M_STATUS_DONE BIT(6) | ||
99 | 80 | ||
100 | #define M2M_BCR0 0x0010 | 81 | #define M2M_BCR0 0x0010 |
101 | #define M2M_BCR1 0x0014 | 82 | #define M2M_BCR1 0x0014 |
@@ -140,6 +121,7 @@ struct ep93xx_dma_desc { | |||
140 | * @lock: lock protecting the fields following | 121 | * @lock: lock protecting the fields following |
141 | * @flags: flags for the channel | 122 | * @flags: flags for the channel |
142 | * @buffer: which buffer to use next (0/1) | 123 | * @buffer: which buffer to use next (0/1) |
124 | * @last_completed: last completed cookie value | ||
143 | * @active: flattened chain of descriptors currently being processed | 125 | * @active: flattened chain of descriptors currently being processed |
144 | * @queue: pending descriptors which are handled next | 126 | * @queue: pending descriptors which are handled next |
145 | * @free_list: list of free descriptors which can be used | 127 | * @free_list: list of free descriptors which can be used |
@@ -174,6 +156,7 @@ struct ep93xx_dma_chan { | |||
174 | #define EP93XX_DMA_IS_CYCLIC 0 | 156 | #define EP93XX_DMA_IS_CYCLIC 0 |
175 | 157 | ||
176 | int buffer; | 158 | int buffer; |
159 | dma_cookie_t last_completed; | ||
177 | struct list_head active; | 160 | struct list_head active; |
178 | struct list_head queue; | 161 | struct list_head queue; |
179 | struct list_head free_list; | 162 | struct list_head free_list; |
@@ -262,9 +245,6 @@ static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac, | |||
262 | static struct ep93xx_dma_desc * | 245 | static struct ep93xx_dma_desc * |
263 | ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac) | 246 | ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac) |
264 | { | 247 | { |
265 | if (list_empty(&edmac->active)) | ||
266 | return NULL; | ||
267 | |||
268 | return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node); | 248 | return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node); |
269 | } | 249 | } |
270 | 250 | ||
@@ -282,22 +262,16 @@ ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac) | |||
282 | */ | 262 | */ |
283 | static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac) | 263 | static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac) |
284 | { | 264 | { |
285 | struct ep93xx_dma_desc *desc; | ||
286 | |||
287 | list_rotate_left(&edmac->active); | 265 | list_rotate_left(&edmac->active); |
288 | 266 | ||
289 | if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) | 267 | if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) |
290 | return true; | 268 | return true; |
291 | 269 | ||
292 | desc = ep93xx_dma_get_active(edmac); | ||
293 | if (!desc) | ||
294 | return false; | ||
295 | |||
296 | /* | 270 | /* |
297 | * If txd.cookie is set it means that we are back in the first | 271 | * If txd.cookie is set it means that we are back in the first |
298 | * descriptor in the chain and hence done with it. | 272 | * descriptor in the chain and hence done with it. |
299 | */ | 273 | */ |
300 | return !desc->txd.cookie; | 274 | return !ep93xx_dma_get_active(edmac)->txd.cookie; |
301 | } | 275 | } |
302 | 276 | ||
303 | /* | 277 | /* |
@@ -352,16 +326,10 @@ static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac) | |||
352 | 326 | ||
353 | static void m2p_fill_desc(struct ep93xx_dma_chan *edmac) | 327 | static void m2p_fill_desc(struct ep93xx_dma_chan *edmac) |
354 | { | 328 | { |
355 | struct ep93xx_dma_desc *desc; | 329 | struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac); |
356 | u32 bus_addr; | 330 | u32 bus_addr; |
357 | 331 | ||
358 | desc = ep93xx_dma_get_active(edmac); | 332 | if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_TO_DEVICE) |
359 | if (!desc) { | ||
360 | dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n"); | ||
361 | return; | ||
362 | } | ||
363 | |||
364 | if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV) | ||
365 | bus_addr = desc->src_addr; | 333 | bus_addr = desc->src_addr; |
366 | else | 334 | else |
367 | bus_addr = desc->dst_addr; | 335 | bus_addr = desc->dst_addr; |
@@ -442,6 +410,15 @@ static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac) | |||
442 | 410 | ||
443 | /* | 411 | /* |
444 | * M2M DMA implementation | 412 | * M2M DMA implementation |
413 | * | ||
414 | * For the M2M transfers we don't use NFB at all. This is because it simply | ||
415 | * doesn't work well with memcpy transfers. When you submit both buffers it is | ||
416 | * extremely unlikely that you get an NFB interrupt, but it instead reports | ||
417 | * DONE interrupt and both buffers are already transferred which means that we | ||
418 | * weren't able to update the next buffer. | ||
419 | * | ||
420 | * So for now we "simulate" NFB by just submitting buffer after buffer | ||
421 | * without double buffering. | ||
445 | */ | 422 | */ |
446 | 423 | ||
447 | static int m2m_hw_setup(struct ep93xx_dma_chan *edmac) | 424 | static int m2m_hw_setup(struct ep93xx_dma_chan *edmac) |
@@ -465,7 +442,7 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac) | |||
465 | control = (5 << M2M_CONTROL_PWSC_SHIFT); | 442 | control = (5 << M2M_CONTROL_PWSC_SHIFT); |
466 | control |= M2M_CONTROL_NO_HDSK; | 443 | control |= M2M_CONTROL_NO_HDSK; |
467 | 444 | ||
468 | if (data->direction == DMA_MEM_TO_DEV) { | 445 | if (data->direction == DMA_TO_DEVICE) { |
469 | control |= M2M_CONTROL_DAH; | 446 | control |= M2M_CONTROL_DAH; |
470 | control |= M2M_CONTROL_TM_TX; | 447 | control |= M2M_CONTROL_TM_TX; |
471 | control |= M2M_CONTROL_RSS_SSPTX; | 448 | control |= M2M_CONTROL_RSS_SSPTX; |
@@ -481,7 +458,11 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac) | |||
481 | * This IDE part is totally untested. Values below are taken | 458 | * This IDE part is totally untested. Values below are taken |
482 | * from the EP93xx Users's Guide and might not be correct. | 459 | * from the EP93xx Users's Guide and might not be correct. |
483 | */ | 460 | */ |
484 | if (data->direction == DMA_MEM_TO_DEV) { | 461 | control |= M2M_CONTROL_NO_HDSK; |
462 | control |= M2M_CONTROL_RSS_IDE; | ||
463 | control |= M2M_CONTROL_PW_16; | ||
464 | |||
465 | if (data->direction == DMA_TO_DEVICE) { | ||
485 | /* Worst case from the UG */ | 466 | /* Worst case from the UG */ |
486 | control = (3 << M2M_CONTROL_PWSC_SHIFT); | 467 | control = (3 << M2M_CONTROL_PWSC_SHIFT); |
487 | control |= M2M_CONTROL_DAH; | 468 | control |= M2M_CONTROL_DAH; |
@@ -491,10 +472,6 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac) | |||
491 | control |= M2M_CONTROL_SAH; | 472 | control |= M2M_CONTROL_SAH; |
492 | control |= M2M_CONTROL_TM_RX; | 473 | control |= M2M_CONTROL_TM_RX; |
493 | } | 474 | } |
494 | |||
495 | control |= M2M_CONTROL_NO_HDSK; | ||
496 | control |= M2M_CONTROL_RSS_IDE; | ||
497 | control |= M2M_CONTROL_PW_16; | ||
498 | break; | 475 | break; |
499 | 476 | ||
500 | default: | 477 | default: |
@@ -513,13 +490,7 @@ static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac) | |||
513 | 490 | ||
514 | static void m2m_fill_desc(struct ep93xx_dma_chan *edmac) | 491 | static void m2m_fill_desc(struct ep93xx_dma_chan *edmac) |
515 | { | 492 | { |
516 | struct ep93xx_dma_desc *desc; | 493 | struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac); |
517 | |||
518 | desc = ep93xx_dma_get_active(edmac); | ||
519 | if (!desc) { | ||
520 | dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n"); | ||
521 | return; | ||
522 | } | ||
523 | 494 | ||
524 | if (edmac->buffer == 0) { | 495 | if (edmac->buffer == 0) { |
525 | writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0); | 496 | writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0); |
@@ -550,11 +521,6 @@ static void m2m_hw_submit(struct ep93xx_dma_chan *edmac) | |||
550 | m2m_fill_desc(edmac); | 521 | m2m_fill_desc(edmac); |
551 | control |= M2M_CONTROL_DONEINT; | 522 | control |= M2M_CONTROL_DONEINT; |
552 | 523 | ||
553 | if (ep93xx_dma_advance_active(edmac)) { | ||
554 | m2m_fill_desc(edmac); | ||
555 | control |= M2M_CONTROL_NFBINT; | ||
556 | } | ||
557 | |||
558 | /* | 524 | /* |
559 | * Now we can finally enable the channel. For M2M channel this must be | 525 | * Now we can finally enable the channel. For M2M channel this must be |
560 | * done _after_ the BCRx registers are programmed. | 526 | * done _after_ the BCRx registers are programmed. |
@@ -572,89 +538,32 @@ static void m2m_hw_submit(struct ep93xx_dma_chan *edmac) | |||
572 | } | 538 | } |
573 | } | 539 | } |
574 | 540 | ||
575 | /* | ||
576 | * According to EP93xx User's Guide, we should receive DONE interrupt when all | ||
577 | * M2M DMA controller transactions complete normally. This is not always the | ||
578 | * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel | ||
579 | * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel | ||
580 | * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation). | ||
581 | * In effect, disabling the channel when only DONE bit is set could stop | ||
582 | * currently running DMA transfer. To avoid this, we use Buffer FSM and | ||
583 | * Control FSM to check current state of DMA channel. | ||
584 | */ | ||
585 | static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac) | 541 | static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac) |
586 | { | 542 | { |
587 | u32 status = readl(edmac->regs + M2M_STATUS); | ||
588 | u32 ctl_fsm = status & M2M_STATUS_CTL_MASK; | ||
589 | u32 buf_fsm = status & M2M_STATUS_BUF_MASK; | ||
590 | bool done = status & M2M_STATUS_DONE; | ||
591 | bool last_done; | ||
592 | u32 control; | 543 | u32 control; |
593 | struct ep93xx_dma_desc *desc; | ||
594 | 544 | ||
595 | /* Accept only DONE and NFB interrupts */ | 545 | if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_DONEINT)) |
596 | if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK)) | ||
597 | return INTERRUPT_UNKNOWN; | 546 | return INTERRUPT_UNKNOWN; |
598 | 547 | ||
599 | if (done) { | 548 | /* Clear the DONE bit */ |
600 | /* Clear the DONE bit */ | 549 | writel(0, edmac->regs + M2M_INTERRUPT); |
601 | writel(0, edmac->regs + M2M_INTERRUPT); | ||
602 | } | ||
603 | |||
604 | /* | ||
605 | * Check whether we are done with descriptors or not. This, together | ||
606 | * with DMA channel state, determines action to take in interrupt. | ||
607 | */ | ||
608 | desc = ep93xx_dma_get_active(edmac); | ||
609 | last_done = !desc || desc->txd.cookie; | ||
610 | 550 | ||
611 | /* | 551 | /* Disable interrupts and the channel */ |
612 | * Use M2M DMA Buffer FSM and Control FSM to check current state of | 552 | control = readl(edmac->regs + M2M_CONTROL); |
613 | * DMA channel. Using DONE and NFB bits from channel status register | 553 | control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_ENABLE); |
614 | * or bits from channel interrupt register is not reliable. | 554 | writel(control, edmac->regs + M2M_CONTROL); |
615 | */ | ||
616 | if (!last_done && | ||
617 | (buf_fsm == M2M_STATUS_BUF_NO || | ||
618 | buf_fsm == M2M_STATUS_BUF_ON)) { | ||
619 | /* | ||
620 | * Two buffers are ready for update when Buffer FSM is in | ||
621 | * DMA_NO_BUF state. Only one buffer can be prepared without | ||
622 | * disabling the channel or polling the DONE bit. | ||
623 | * To simplify things, always prepare only one buffer. | ||
624 | */ | ||
625 | if (ep93xx_dma_advance_active(edmac)) { | ||
626 | m2m_fill_desc(edmac); | ||
627 | if (done && !edmac->chan.private) { | ||
628 | /* Software trigger for memcpy channel */ | ||
629 | control = readl(edmac->regs + M2M_CONTROL); | ||
630 | control |= M2M_CONTROL_START; | ||
631 | writel(control, edmac->regs + M2M_CONTROL); | ||
632 | } | ||
633 | return INTERRUPT_NEXT_BUFFER; | ||
634 | } else { | ||
635 | last_done = true; | ||
636 | } | ||
637 | } | ||
638 | 555 | ||
639 | /* | 556 | /* |
640 | * Disable the channel only when Buffer FSM is in DMA_NO_BUF state | 557 | * Since we only get DONE interrupt we have to find out ourselves |
641 | * and Control FSM is in DMA_STALL state. | 558 | * whether there still is something to process. So we try to advance |
559 | * the chain an see whether it succeeds. | ||
642 | */ | 560 | */ |
643 | if (last_done && | 561 | if (ep93xx_dma_advance_active(edmac)) { |
644 | buf_fsm == M2M_STATUS_BUF_NO && | 562 | edmac->edma->hw_submit(edmac); |
645 | ctl_fsm == M2M_STATUS_CTL_STALL) { | 563 | return INTERRUPT_NEXT_BUFFER; |
646 | /* Disable interrupts and the channel */ | ||
647 | control = readl(edmac->regs + M2M_CONTROL); | ||
648 | control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT | ||
649 | | M2M_CONTROL_ENABLE); | ||
650 | writel(control, edmac->regs + M2M_CONTROL); | ||
651 | return INTERRUPT_DONE; | ||
652 | } | 564 | } |
653 | 565 | ||
654 | /* | 566 | return INTERRUPT_DONE; |
655 | * Nothing to do this time. | ||
656 | */ | ||
657 | return INTERRUPT_NEXT_BUFFER; | ||
658 | } | 567 | } |
659 | 568 | ||
660 | /* | 569 | /* |
@@ -759,32 +668,24 @@ static void ep93xx_dma_tasklet(unsigned long data) | |||
759 | { | 668 | { |
760 | struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; | 669 | struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; |
761 | struct ep93xx_dma_desc *desc, *d; | 670 | struct ep93xx_dma_desc *desc, *d; |
762 | dma_async_tx_callback callback = NULL; | 671 | dma_async_tx_callback callback; |
763 | void *callback_param = NULL; | 672 | void *callback_param; |
764 | LIST_HEAD(list); | 673 | LIST_HEAD(list); |
765 | 674 | ||
766 | spin_lock_irq(&edmac->lock); | 675 | spin_lock_irq(&edmac->lock); |
767 | /* | ||
768 | * If dma_terminate_all() was called before we get to run, the active | ||
769 | * list has become empty. If that happens we aren't supposed to do | ||
770 | * anything more than call ep93xx_dma_advance_work(). | ||
771 | */ | ||
772 | desc = ep93xx_dma_get_active(edmac); | 676 | desc = ep93xx_dma_get_active(edmac); |
773 | if (desc) { | 677 | if (desc->complete) { |
774 | if (desc->complete) { | 678 | edmac->last_completed = desc->txd.cookie; |
775 | /* mark descriptor complete for non cyclic case only */ | 679 | list_splice_init(&edmac->active, &list); |
776 | if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) | ||
777 | dma_cookie_complete(&desc->txd); | ||
778 | list_splice_init(&edmac->active, &list); | ||
779 | } | ||
780 | callback = desc->txd.callback; | ||
781 | callback_param = desc->txd.callback_param; | ||
782 | } | 680 | } |
783 | spin_unlock_irq(&edmac->lock); | 681 | spin_unlock_irq(&edmac->lock); |
784 | 682 | ||
785 | /* Pick up the next descriptor from the queue */ | 683 | /* Pick up the next descriptor from the queue */ |
786 | ep93xx_dma_advance_work(edmac); | 684 | ep93xx_dma_advance_work(edmac); |
787 | 685 | ||
686 | callback = desc->txd.callback; | ||
687 | callback_param = desc->txd.callback_param; | ||
688 | |||
788 | /* Now we can release all the chained descriptors */ | 689 | /* Now we can release all the chained descriptors */ |
789 | list_for_each_entry_safe(desc, d, &list, node) { | 690 | list_for_each_entry_safe(desc, d, &list, node) { |
790 | /* | 691 | /* |
@@ -804,22 +705,13 @@ static void ep93xx_dma_tasklet(unsigned long data) | |||
804 | static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id) | 705 | static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id) |
805 | { | 706 | { |
806 | struct ep93xx_dma_chan *edmac = dev_id; | 707 | struct ep93xx_dma_chan *edmac = dev_id; |
807 | struct ep93xx_dma_desc *desc; | ||
808 | irqreturn_t ret = IRQ_HANDLED; | 708 | irqreturn_t ret = IRQ_HANDLED; |
809 | 709 | ||
810 | spin_lock(&edmac->lock); | 710 | spin_lock(&edmac->lock); |
811 | 711 | ||
812 | desc = ep93xx_dma_get_active(edmac); | ||
813 | if (!desc) { | ||
814 | dev_warn(chan2dev(edmac), | ||
815 | "got interrupt while active list is empty\n"); | ||
816 | spin_unlock(&edmac->lock); | ||
817 | return IRQ_NONE; | ||
818 | } | ||
819 | |||
820 | switch (edmac->edma->hw_interrupt(edmac)) { | 712 | switch (edmac->edma->hw_interrupt(edmac)) { |
821 | case INTERRUPT_DONE: | 713 | case INTERRUPT_DONE: |
822 | desc->complete = true; | 714 | ep93xx_dma_get_active(edmac)->complete = true; |
823 | tasklet_schedule(&edmac->tasklet); | 715 | tasklet_schedule(&edmac->tasklet); |
824 | break; | 716 | break; |
825 | 717 | ||
@@ -854,10 +746,17 @@ static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
854 | unsigned long flags; | 746 | unsigned long flags; |
855 | 747 | ||
856 | spin_lock_irqsave(&edmac->lock, flags); | 748 | spin_lock_irqsave(&edmac->lock, flags); |
857 | cookie = dma_cookie_assign(tx); | 749 | |
750 | cookie = edmac->chan.cookie; | ||
751 | |||
752 | if (++cookie < 0) | ||
753 | cookie = 1; | ||
858 | 754 | ||
859 | desc = container_of(tx, struct ep93xx_dma_desc, txd); | 755 | desc = container_of(tx, struct ep93xx_dma_desc, txd); |
860 | 756 | ||
757 | edmac->chan.cookie = cookie; | ||
758 | desc->txd.cookie = cookie; | ||
759 | |||
861 | /* | 760 | /* |
862 | * If nothing is currently prosessed, we push this descriptor | 761 | * If nothing is currently prosessed, we push this descriptor |
863 | * directly to the hardware. Otherwise we put the descriptor | 762 | * directly to the hardware. Otherwise we put the descriptor |
@@ -903,8 +802,8 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan) | |||
903 | switch (data->port) { | 802 | switch (data->port) { |
904 | case EP93XX_DMA_SSP: | 803 | case EP93XX_DMA_SSP: |
905 | case EP93XX_DMA_IDE: | 804 | case EP93XX_DMA_IDE: |
906 | if (data->direction != DMA_MEM_TO_DEV && | 805 | if (data->direction != DMA_TO_DEVICE && |
907 | data->direction != DMA_DEV_TO_MEM) | 806 | data->direction != DMA_FROM_DEVICE) |
908 | return -EINVAL; | 807 | return -EINVAL; |
909 | break; | 808 | break; |
910 | default: | 809 | default: |
@@ -925,7 +824,8 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan) | |||
925 | goto fail_clk_disable; | 824 | goto fail_clk_disable; |
926 | 825 | ||
927 | spin_lock_irq(&edmac->lock); | 826 | spin_lock_irq(&edmac->lock); |
928 | dma_cookie_init(&edmac->chan); | 827 | edmac->last_completed = 1; |
828 | edmac->chan.cookie = 1; | ||
929 | ret = edmac->edma->hw_setup(edmac); | 829 | ret = edmac->edma->hw_setup(edmac); |
930 | spin_unlock_irq(&edmac->lock); | 830 | spin_unlock_irq(&edmac->lock); |
931 | 831 | ||
@@ -1046,14 +946,13 @@ fail: | |||
1046 | * @sg_len: number of entries in @sgl | 946 | * @sg_len: number of entries in @sgl |
1047 | * @dir: direction of tha DMA transfer | 947 | * @dir: direction of tha DMA transfer |
1048 | * @flags: flags for the descriptor | 948 | * @flags: flags for the descriptor |
1049 | * @context: operation context (ignored) | ||
1050 | * | 949 | * |
1051 | * Returns a valid DMA descriptor or %NULL in case of failure. | 950 | * Returns a valid DMA descriptor or %NULL in case of failure. |
1052 | */ | 951 | */ |
1053 | static struct dma_async_tx_descriptor * | 952 | static struct dma_async_tx_descriptor * |
1054 | ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 953 | ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
1055 | unsigned int sg_len, enum dma_transfer_direction dir, | 954 | unsigned int sg_len, enum dma_data_direction dir, |
1056 | unsigned long flags, void *context) | 955 | unsigned long flags) |
1057 | { | 956 | { |
1058 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | 957 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); |
1059 | struct ep93xx_dma_desc *desc, *first; | 958 | struct ep93xx_dma_desc *desc, *first; |
@@ -1088,7 +987,7 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
1088 | goto fail; | 987 | goto fail; |
1089 | } | 988 | } |
1090 | 989 | ||
1091 | if (dir == DMA_MEM_TO_DEV) { | 990 | if (dir == DMA_TO_DEVICE) { |
1092 | desc->src_addr = sg_dma_address(sg); | 991 | desc->src_addr = sg_dma_address(sg); |
1093 | desc->dst_addr = edmac->runtime_addr; | 992 | desc->dst_addr = edmac->runtime_addr; |
1094 | } else { | 993 | } else { |
@@ -1118,10 +1017,8 @@ fail: | |||
1118 | * @chan: channel | 1017 | * @chan: channel |
1119 | * @dma_addr: DMA mapped address of the buffer | 1018 | * @dma_addr: DMA mapped address of the buffer |
1120 | * @buf_len: length of the buffer (in bytes) | 1019 | * @buf_len: length of the buffer (in bytes) |
1121 | * @period_len: length of a single period | 1020 | * @period_len: lenght of a single period |
1122 | * @dir: direction of the operation | 1021 | * @dir: direction of the operation |
1123 | * @flags: tx descriptor status flags | ||
1124 | * @context: operation context (ignored) | ||
1125 | * | 1022 | * |
1126 | * Prepares a descriptor for cyclic DMA operation. This means that once the | 1023 | * Prepares a descriptor for cyclic DMA operation. This means that once the |
1127 | * descriptor is submitted, we will be submitting in a @period_len sized | 1024 | * descriptor is submitted, we will be submitting in a @period_len sized |
@@ -1134,8 +1031,7 @@ fail: | |||
1134 | static struct dma_async_tx_descriptor * | 1031 | static struct dma_async_tx_descriptor * |
1135 | ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, | 1032 | ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, |
1136 | size_t buf_len, size_t period_len, | 1033 | size_t buf_len, size_t period_len, |
1137 | enum dma_transfer_direction dir, unsigned long flags, | 1034 | enum dma_data_direction dir) |
1138 | void *context) | ||
1139 | { | 1035 | { |
1140 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | 1036 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); |
1141 | struct ep93xx_dma_desc *desc, *first; | 1037 | struct ep93xx_dma_desc *desc, *first; |
@@ -1168,7 +1064,7 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, | |||
1168 | goto fail; | 1064 | goto fail; |
1169 | } | 1065 | } |
1170 | 1066 | ||
1171 | if (dir == DMA_MEM_TO_DEV) { | 1067 | if (dir == DMA_TO_DEVICE) { |
1172 | desc->src_addr = dma_addr + offset; | 1068 | desc->src_addr = dma_addr + offset; |
1173 | desc->dst_addr = edmac->runtime_addr; | 1069 | desc->dst_addr = edmac->runtime_addr; |
1174 | } else { | 1070 | } else { |
@@ -1236,12 +1132,12 @@ static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac, | |||
1236 | return -EINVAL; | 1132 | return -EINVAL; |
1237 | 1133 | ||
1238 | switch (config->direction) { | 1134 | switch (config->direction) { |
1239 | case DMA_DEV_TO_MEM: | 1135 | case DMA_FROM_DEVICE: |
1240 | width = config->src_addr_width; | 1136 | width = config->src_addr_width; |
1241 | addr = config->src_addr; | 1137 | addr = config->src_addr; |
1242 | break; | 1138 | break; |
1243 | 1139 | ||
1244 | case DMA_MEM_TO_DEV: | 1140 | case DMA_TO_DEVICE: |
1245 | width = config->dst_addr_width; | 1141 | width = config->dst_addr_width; |
1246 | addr = config->dst_addr; | 1142 | addr = config->dst_addr; |
1247 | break; | 1143 | break; |
@@ -1315,13 +1211,18 @@ static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan, | |||
1315 | struct dma_tx_state *state) | 1211 | struct dma_tx_state *state) |
1316 | { | 1212 | { |
1317 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | 1213 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); |
1214 | dma_cookie_t last_used, last_completed; | ||
1318 | enum dma_status ret; | 1215 | enum dma_status ret; |
1319 | unsigned long flags; | 1216 | unsigned long flags; |
1320 | 1217 | ||
1321 | spin_lock_irqsave(&edmac->lock, flags); | 1218 | spin_lock_irqsave(&edmac->lock, flags); |
1322 | ret = dma_cookie_status(chan, cookie, state); | 1219 | last_used = chan->cookie; |
1220 | last_completed = edmac->last_completed; | ||
1323 | spin_unlock_irqrestore(&edmac->lock, flags); | 1221 | spin_unlock_irqrestore(&edmac->lock, flags); |
1324 | 1222 | ||
1223 | ret = dma_async_is_complete(cookie, last_completed, last_used); | ||
1224 | dma_set_tx_state(state, last_completed, last_used, 0); | ||
1225 | |||
1325 | return ret; | 1226 | return ret; |
1326 | } | 1227 | } |
1327 | 1228 | ||
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 4fc2980556a..8a781540590 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -35,7 +35,6 @@ | |||
35 | #include <linux/dmapool.h> | 35 | #include <linux/dmapool.h> |
36 | #include <linux/of_platform.h> | 36 | #include <linux/of_platform.h> |
37 | 37 | ||
38 | #include "dmaengine.h" | ||
39 | #include "fsldma.h" | 38 | #include "fsldma.h" |
40 | 39 | ||
41 | #define chan_dbg(chan, fmt, arg...) \ | 40 | #define chan_dbg(chan, fmt, arg...) \ |
@@ -414,10 +413,17 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
414 | * assign cookies to all of the software descriptors | 413 | * assign cookies to all of the software descriptors |
415 | * that make up this transaction | 414 | * that make up this transaction |
416 | */ | 415 | */ |
416 | cookie = chan->common.cookie; | ||
417 | list_for_each_entry(child, &desc->tx_list, node) { | 417 | list_for_each_entry(child, &desc->tx_list, node) { |
418 | cookie = dma_cookie_assign(&child->async_tx); | 418 | cookie++; |
419 | if (cookie < DMA_MIN_COOKIE) | ||
420 | cookie = DMA_MIN_COOKIE; | ||
421 | |||
422 | child->async_tx.cookie = cookie; | ||
419 | } | 423 | } |
420 | 424 | ||
425 | chan->common.cookie = cookie; | ||
426 | |||
421 | /* put this transaction onto the tail of the pending queue */ | 427 | /* put this transaction onto the tail of the pending queue */ |
422 | append_ld_queue(chan, desc); | 428 | append_ld_queue(chan, desc); |
423 | 429 | ||
@@ -759,7 +765,6 @@ fail: | |||
759 | * @sg_len: number of entries in @scatterlist | 765 | * @sg_len: number of entries in @scatterlist |
760 | * @direction: DMA direction | 766 | * @direction: DMA direction |
761 | * @flags: DMAEngine flags | 767 | * @flags: DMAEngine flags |
762 | * @context: transaction context (ignored) | ||
763 | * | 768 | * |
764 | * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the | 769 | * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the |
765 | * DMA_SLAVE API, this gets the device-specific information from the | 770 | * DMA_SLAVE API, this gets the device-specific information from the |
@@ -767,8 +772,7 @@ fail: | |||
767 | */ | 772 | */ |
768 | static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( | 773 | static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( |
769 | struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, | 774 | struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, |
770 | enum dma_transfer_direction direction, unsigned long flags, | 775 | enum dma_data_direction direction, unsigned long flags) |
771 | void *context) | ||
772 | { | 776 | { |
773 | /* | 777 | /* |
774 | * This operation is not supported on the Freescale DMA controller | 778 | * This operation is not supported on the Freescale DMA controller |
@@ -815,7 +819,7 @@ static int fsl_dma_device_control(struct dma_chan *dchan, | |||
815 | return -ENXIO; | 819 | return -ENXIO; |
816 | 820 | ||
817 | /* we set the controller burst size depending on direction */ | 821 | /* we set the controller burst size depending on direction */ |
818 | if (config->direction == DMA_MEM_TO_DEV) | 822 | if (config->direction == DMA_TO_DEVICE) |
819 | size = config->dst_addr_width * config->dst_maxburst; | 823 | size = config->dst_addr_width * config->dst_maxburst; |
820 | else | 824 | else |
821 | size = config->src_addr_width * config->src_maxburst; | 825 | size = config->src_addr_width * config->src_maxburst; |
@@ -980,14 +984,19 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan, | |||
980 | struct dma_tx_state *txstate) | 984 | struct dma_tx_state *txstate) |
981 | { | 985 | { |
982 | struct fsldma_chan *chan = to_fsl_chan(dchan); | 986 | struct fsldma_chan *chan = to_fsl_chan(dchan); |
983 | enum dma_status ret; | 987 | dma_cookie_t last_complete; |
988 | dma_cookie_t last_used; | ||
984 | unsigned long flags; | 989 | unsigned long flags; |
985 | 990 | ||
986 | spin_lock_irqsave(&chan->desc_lock, flags); | 991 | spin_lock_irqsave(&chan->desc_lock, flags); |
987 | ret = dma_cookie_status(dchan, cookie, txstate); | 992 | |
993 | last_complete = chan->completed_cookie; | ||
994 | last_used = dchan->cookie; | ||
995 | |||
988 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 996 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
989 | 997 | ||
990 | return ret; | 998 | dma_set_tx_state(txstate, last_complete, last_used, 0); |
999 | return dma_async_is_complete(cookie, last_complete, last_used); | ||
991 | } | 1000 | } |
992 | 1001 | ||
993 | /*----------------------------------------------------------------------------*/ | 1002 | /*----------------------------------------------------------------------------*/ |
@@ -1015,7 +1024,7 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) | |||
1015 | /* | 1024 | /* |
1016 | * Programming Error | 1025 | * Programming Error |
1017 | * The DMA_INTERRUPT async_tx is a NULL transfer, which will | 1026 | * The DMA_INTERRUPT async_tx is a NULL transfer, which will |
1018 | * trigger a PE interrupt. | 1027 | * triger a PE interrupt. |
1019 | */ | 1028 | */ |
1020 | if (stat & FSL_DMA_SR_PE) { | 1029 | if (stat & FSL_DMA_SR_PE) { |
1021 | chan_dbg(chan, "irq: Programming Error INT\n"); | 1030 | chan_dbg(chan, "irq: Programming Error INT\n"); |
@@ -1078,8 +1087,8 @@ static void dma_do_tasklet(unsigned long data) | |||
1078 | 1087 | ||
1079 | desc = to_fsl_desc(chan->ld_running.prev); | 1088 | desc = to_fsl_desc(chan->ld_running.prev); |
1080 | cookie = desc->async_tx.cookie; | 1089 | cookie = desc->async_tx.cookie; |
1081 | dma_cookie_complete(&desc->async_tx); | ||
1082 | 1090 | ||
1091 | chan->completed_cookie = cookie; | ||
1083 | chan_dbg(chan, "completed_cookie=%d\n", cookie); | 1092 | chan_dbg(chan, "completed_cookie=%d\n", cookie); |
1084 | } | 1093 | } |
1085 | 1094 | ||
@@ -1221,7 +1230,7 @@ out_unwind: | |||
1221 | /* OpenFirmware Subsystem */ | 1230 | /* OpenFirmware Subsystem */ |
1222 | /*----------------------------------------------------------------------------*/ | 1231 | /*----------------------------------------------------------------------------*/ |
1223 | 1232 | ||
1224 | static int fsl_dma_chan_probe(struct fsldma_device *fdev, | 1233 | static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, |
1225 | struct device_node *node, u32 feature, const char *compatible) | 1234 | struct device_node *node, u32 feature, const char *compatible) |
1226 | { | 1235 | { |
1227 | struct fsldma_chan *chan; | 1236 | struct fsldma_chan *chan; |
@@ -1294,7 +1303,6 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev, | |||
1294 | chan->idle = true; | 1303 | chan->idle = true; |
1295 | 1304 | ||
1296 | chan->common.device = &fdev->common; | 1305 | chan->common.device = &fdev->common; |
1297 | dma_cookie_init(&chan->common); | ||
1298 | 1306 | ||
1299 | /* find the IRQ line, if it exists in the device tree */ | 1307 | /* find the IRQ line, if it exists in the device tree */ |
1300 | chan->irq = irq_of_parse_and_map(node, 0); | 1308 | chan->irq = irq_of_parse_and_map(node, 0); |
@@ -1324,7 +1332,7 @@ static void fsl_dma_chan_remove(struct fsldma_chan *chan) | |||
1324 | kfree(chan); | 1332 | kfree(chan); |
1325 | } | 1333 | } |
1326 | 1334 | ||
1327 | static int fsldma_of_probe(struct platform_device *op) | 1335 | static int __devinit fsldma_of_probe(struct platform_device *op) |
1328 | { | 1336 | { |
1329 | struct fsldma_device *fdev; | 1337 | struct fsldma_device *fdev; |
1330 | struct device_node *child; | 1338 | struct device_node *child; |
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index f5c38791fc7..9cb5aa57c67 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h | |||
@@ -137,6 +137,7 @@ struct fsldma_device { | |||
137 | struct fsldma_chan { | 137 | struct fsldma_chan { |
138 | char name[8]; /* Channel name */ | 138 | char name[8]; /* Channel name */ |
139 | struct fsldma_chan_regs __iomem *regs; | 139 | struct fsldma_chan_regs __iomem *regs; |
140 | dma_cookie_t completed_cookie; /* The maximum cookie completed */ | ||
140 | spinlock_t desc_lock; /* Descriptor operation lock */ | 141 | spinlock_t desc_lock; /* Descriptor operation lock */ |
141 | struct list_head ld_pending; /* Link descriptors queue */ | 142 | struct list_head ld_pending; /* Link descriptors queue */ |
142 | struct list_head ld_running; /* Link descriptors queue */ | 143 | struct list_head ld_running; /* Link descriptors queue */ |
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index dbf0e6f8de8..d99f71c356b 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c | |||
@@ -5,7 +5,6 @@ | |||
5 | * found on i.MX1/21/27 | 5 | * found on i.MX1/21/27 |
6 | * | 6 | * |
7 | * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> | 7 | * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> |
8 | * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com> | ||
9 | * | 8 | * |
10 | * The code contained herein is licensed under the GNU General Public | 9 | * The code contained herein is licensed under the GNU General Public |
11 | * License. You may obtain a copy of the GNU General Public License | 10 | * License. You may obtain a copy of the GNU General Public License |
@@ -23,623 +22,73 @@ | |||
23 | #include <linux/dma-mapping.h> | 22 | #include <linux/dma-mapping.h> |
24 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
25 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
26 | #include <linux/clk.h> | ||
27 | #include <linux/dmaengine.h> | 25 | #include <linux/dmaengine.h> |
28 | #include <linux/module.h> | ||
29 | 26 | ||
30 | #include <asm/irq.h> | 27 | #include <asm/irq.h> |
31 | #include <linux/platform_data/dma-imx.h> | 28 | #include <mach/dma-v1.h> |
32 | 29 | #include <mach/hardware.h> | |
33 | #include "dmaengine.h" | ||
34 | #define IMXDMA_MAX_CHAN_DESCRIPTORS 16 | ||
35 | #define IMX_DMA_CHANNELS 16 | ||
36 | |||
37 | #define IMX_DMA_2D_SLOTS 2 | ||
38 | #define IMX_DMA_2D_SLOT_A 0 | ||
39 | #define IMX_DMA_2D_SLOT_B 1 | ||
40 | |||
41 | #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1) | ||
42 | #define IMX_DMA_MEMSIZE_32 (0 << 4) | ||
43 | #define IMX_DMA_MEMSIZE_8 (1 << 4) | ||
44 | #define IMX_DMA_MEMSIZE_16 (2 << 4) | ||
45 | #define IMX_DMA_TYPE_LINEAR (0 << 10) | ||
46 | #define IMX_DMA_TYPE_2D (1 << 10) | ||
47 | #define IMX_DMA_TYPE_FIFO (2 << 10) | ||
48 | |||
49 | #define IMX_DMA_ERR_BURST (1 << 0) | ||
50 | #define IMX_DMA_ERR_REQUEST (1 << 1) | ||
51 | #define IMX_DMA_ERR_TRANSFER (1 << 2) | ||
52 | #define IMX_DMA_ERR_BUFFER (1 << 3) | ||
53 | #define IMX_DMA_ERR_TIMEOUT (1 << 4) | ||
54 | |||
55 | #define DMA_DCR 0x00 /* Control Register */ | ||
56 | #define DMA_DISR 0x04 /* Interrupt status Register */ | ||
57 | #define DMA_DIMR 0x08 /* Interrupt mask Register */ | ||
58 | #define DMA_DBTOSR 0x0c /* Burst timeout status Register */ | ||
59 | #define DMA_DRTOSR 0x10 /* Request timeout Register */ | ||
60 | #define DMA_DSESR 0x14 /* Transfer Error Status Register */ | ||
61 | #define DMA_DBOSR 0x18 /* Buffer overflow status Register */ | ||
62 | #define DMA_DBTOCR 0x1c /* Burst timeout control Register */ | ||
63 | #define DMA_WSRA 0x40 /* W-Size Register A */ | ||
64 | #define DMA_XSRA 0x44 /* X-Size Register A */ | ||
65 | #define DMA_YSRA 0x48 /* Y-Size Register A */ | ||
66 | #define DMA_WSRB 0x4c /* W-Size Register B */ | ||
67 | #define DMA_XSRB 0x50 /* X-Size Register B */ | ||
68 | #define DMA_YSRB 0x54 /* Y-Size Register B */ | ||
69 | #define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */ | ||
70 | #define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */ | ||
71 | #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */ | ||
72 | #define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */ | ||
73 | #define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */ | ||
74 | #define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */ | ||
75 | #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */ | ||
76 | #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */ | ||
77 | #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */ | ||
78 | |||
79 | #define DCR_DRST (1<<1) | ||
80 | #define DCR_DEN (1<<0) | ||
81 | #define DBTOCR_EN (1<<15) | ||
82 | #define DBTOCR_CNT(x) ((x) & 0x7fff) | ||
83 | #define CNTR_CNT(x) ((x) & 0xffffff) | ||
84 | #define CCR_ACRPT (1<<14) | ||
85 | #define CCR_DMOD_LINEAR (0x0 << 12) | ||
86 | #define CCR_DMOD_2D (0x1 << 12) | ||
87 | #define CCR_DMOD_FIFO (0x2 << 12) | ||
88 | #define CCR_DMOD_EOBFIFO (0x3 << 12) | ||
89 | #define CCR_SMOD_LINEAR (0x0 << 10) | ||
90 | #define CCR_SMOD_2D (0x1 << 10) | ||
91 | #define CCR_SMOD_FIFO (0x2 << 10) | ||
92 | #define CCR_SMOD_EOBFIFO (0x3 << 10) | ||
93 | #define CCR_MDIR_DEC (1<<9) | ||
94 | #define CCR_MSEL_B (1<<8) | ||
95 | #define CCR_DSIZ_32 (0x0 << 6) | ||
96 | #define CCR_DSIZ_8 (0x1 << 6) | ||
97 | #define CCR_DSIZ_16 (0x2 << 6) | ||
98 | #define CCR_SSIZ_32 (0x0 << 4) | ||
99 | #define CCR_SSIZ_8 (0x1 << 4) | ||
100 | #define CCR_SSIZ_16 (0x2 << 4) | ||
101 | #define CCR_REN (1<<3) | ||
102 | #define CCR_RPT (1<<2) | ||
103 | #define CCR_FRC (1<<1) | ||
104 | #define CCR_CEN (1<<0) | ||
105 | #define RTOR_EN (1<<15) | ||
106 | #define RTOR_CLK (1<<14) | ||
107 | #define RTOR_PSC (1<<13) | ||
108 | |||
109 | enum imxdma_prep_type { | ||
110 | IMXDMA_DESC_MEMCPY, | ||
111 | IMXDMA_DESC_INTERLEAVED, | ||
112 | IMXDMA_DESC_SLAVE_SG, | ||
113 | IMXDMA_DESC_CYCLIC, | ||
114 | }; | ||
115 | |||
116 | struct imx_dma_2d_config { | ||
117 | u16 xsr; | ||
118 | u16 ysr; | ||
119 | u16 wsr; | ||
120 | int count; | ||
121 | }; | ||
122 | |||
123 | struct imxdma_desc { | ||
124 | struct list_head node; | ||
125 | struct dma_async_tx_descriptor desc; | ||
126 | enum dma_status status; | ||
127 | dma_addr_t src; | ||
128 | dma_addr_t dest; | ||
129 | size_t len; | ||
130 | enum dma_transfer_direction direction; | ||
131 | enum imxdma_prep_type type; | ||
132 | /* For memcpy and interleaved */ | ||
133 | unsigned int config_port; | ||
134 | unsigned int config_mem; | ||
135 | /* For interleaved transfers */ | ||
136 | unsigned int x; | ||
137 | unsigned int y; | ||
138 | unsigned int w; | ||
139 | /* For slave sg and cyclic */ | ||
140 | struct scatterlist *sg; | ||
141 | unsigned int sgcount; | ||
142 | }; | ||
143 | 30 | ||
144 | struct imxdma_channel { | 31 | struct imxdma_channel { |
145 | int hw_chaining; | ||
146 | struct timer_list watchdog; | ||
147 | struct imxdma_engine *imxdma; | 32 | struct imxdma_engine *imxdma; |
148 | unsigned int channel; | 33 | unsigned int channel; |
34 | unsigned int imxdma_channel; | ||
149 | 35 | ||
150 | struct tasklet_struct dma_tasklet; | ||
151 | struct list_head ld_free; | ||
152 | struct list_head ld_queue; | ||
153 | struct list_head ld_active; | ||
154 | int descs_allocated; | ||
155 | enum dma_slave_buswidth word_size; | 36 | enum dma_slave_buswidth word_size; |
156 | dma_addr_t per_address; | 37 | dma_addr_t per_address; |
157 | u32 watermark_level; | 38 | u32 watermark_level; |
158 | struct dma_chan chan; | 39 | struct dma_chan chan; |
40 | spinlock_t lock; | ||
159 | struct dma_async_tx_descriptor desc; | 41 | struct dma_async_tx_descriptor desc; |
42 | dma_cookie_t last_completed; | ||
160 | enum dma_status status; | 43 | enum dma_status status; |
161 | int dma_request; | 44 | int dma_request; |
162 | struct scatterlist *sg_list; | 45 | struct scatterlist *sg_list; |
163 | u32 ccr_from_device; | ||
164 | u32 ccr_to_device; | ||
165 | bool enabled_2d; | ||
166 | int slot_2d; | ||
167 | }; | 46 | }; |
168 | 47 | ||
169 | enum imx_dma_type { | 48 | #define MAX_DMA_CHANNELS 8 |
170 | IMX1_DMA, | ||
171 | IMX21_DMA, | ||
172 | IMX27_DMA, | ||
173 | }; | ||
174 | 49 | ||
175 | struct imxdma_engine { | 50 | struct imxdma_engine { |
176 | struct device *dev; | 51 | struct device *dev; |
177 | struct device_dma_parameters dma_parms; | 52 | struct device_dma_parameters dma_parms; |
178 | struct dma_device dma_device; | 53 | struct dma_device dma_device; |
179 | void __iomem *base; | 54 | struct imxdma_channel channel[MAX_DMA_CHANNELS]; |
180 | struct clk *dma_ahb; | ||
181 | struct clk *dma_ipg; | ||
182 | spinlock_t lock; | ||
183 | struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS]; | ||
184 | struct imxdma_channel channel[IMX_DMA_CHANNELS]; | ||
185 | enum imx_dma_type devtype; | ||
186 | }; | 55 | }; |
187 | 56 | ||
188 | static struct platform_device_id imx_dma_devtype[] = { | ||
189 | { | ||
190 | .name = "imx1-dma", | ||
191 | .driver_data = IMX1_DMA, | ||
192 | }, { | ||
193 | .name = "imx21-dma", | ||
194 | .driver_data = IMX21_DMA, | ||
195 | }, { | ||
196 | .name = "imx27-dma", | ||
197 | .driver_data = IMX27_DMA, | ||
198 | }, { | ||
199 | /* sentinel */ | ||
200 | } | ||
201 | }; | ||
202 | MODULE_DEVICE_TABLE(platform, imx_dma_devtype); | ||
203 | |||
204 | static inline int is_imx1_dma(struct imxdma_engine *imxdma) | ||
205 | { | ||
206 | return imxdma->devtype == IMX1_DMA; | ||
207 | } | ||
208 | |||
209 | static inline int is_imx21_dma(struct imxdma_engine *imxdma) | ||
210 | { | ||
211 | return imxdma->devtype == IMX21_DMA; | ||
212 | } | ||
213 | |||
214 | static inline int is_imx27_dma(struct imxdma_engine *imxdma) | ||
215 | { | ||
216 | return imxdma->devtype == IMX27_DMA; | ||
217 | } | ||
218 | |||
219 | static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan) | 57 | static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan) |
220 | { | 58 | { |
221 | return container_of(chan, struct imxdma_channel, chan); | 59 | return container_of(chan, struct imxdma_channel, chan); |
222 | } | 60 | } |
223 | 61 | ||
224 | static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac) | 62 | static void imxdma_handle(struct imxdma_channel *imxdmac) |
225 | { | ||
226 | struct imxdma_desc *desc; | ||
227 | |||
228 | if (!list_empty(&imxdmac->ld_active)) { | ||
229 | desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, | ||
230 | node); | ||
231 | if (desc->type == IMXDMA_DESC_CYCLIC) | ||
232 | return true; | ||
233 | } | ||
234 | return false; | ||
235 | } | ||
236 | |||
237 | |||
238 | |||
239 | static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val, | ||
240 | unsigned offset) | ||
241 | { | ||
242 | __raw_writel(val, imxdma->base + offset); | ||
243 | } | ||
244 | |||
245 | static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset) | ||
246 | { | ||
247 | return __raw_readl(imxdma->base + offset); | ||
248 | } | ||
249 | |||
250 | static int imxdma_hw_chain(struct imxdma_channel *imxdmac) | ||
251 | { | ||
252 | struct imxdma_engine *imxdma = imxdmac->imxdma; | ||
253 | |||
254 | if (is_imx27_dma(imxdma)) | ||
255 | return imxdmac->hw_chaining; | ||
256 | else | ||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | /* | ||
261 | * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation | ||
262 | */ | ||
263 | static inline int imxdma_sg_next(struct imxdma_desc *d) | ||
264 | { | ||
265 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); | ||
266 | struct imxdma_engine *imxdma = imxdmac->imxdma; | ||
267 | struct scatterlist *sg = d->sg; | ||
268 | unsigned long now; | ||
269 | |||
270 | now = min(d->len, sg_dma_len(sg)); | ||
271 | if (d->len != IMX_DMA_LENGTH_LOOP) | ||
272 | d->len -= now; | ||
273 | |||
274 | if (d->direction == DMA_DEV_TO_MEM) | ||
275 | imx_dmav1_writel(imxdma, sg->dma_address, | ||
276 | DMA_DAR(imxdmac->channel)); | ||
277 | else | ||
278 | imx_dmav1_writel(imxdma, sg->dma_address, | ||
279 | DMA_SAR(imxdmac->channel)); | ||
280 | |||
281 | imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel)); | ||
282 | |||
283 | dev_dbg(imxdma->dev, " %s channel: %d dst 0x%08x, src 0x%08x, " | ||
284 | "size 0x%08x\n", __func__, imxdmac->channel, | ||
285 | imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)), | ||
286 | imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)), | ||
287 | imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel))); | ||
288 | |||
289 | return now; | ||
290 | } | ||
291 | |||
292 | static void imxdma_enable_hw(struct imxdma_desc *d) | ||
293 | { | ||
294 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); | ||
295 | struct imxdma_engine *imxdma = imxdmac->imxdma; | ||
296 | int channel = imxdmac->channel; | ||
297 | unsigned long flags; | ||
298 | |||
299 | dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel); | ||
300 | |||
301 | local_irq_save(flags); | ||
302 | |||
303 | imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR); | ||
304 | imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) & | ||
305 | ~(1 << channel), DMA_DIMR); | ||
306 | imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) | | ||
307 | CCR_CEN | CCR_ACRPT, DMA_CCR(channel)); | ||
308 | |||
309 | if (!is_imx1_dma(imxdma) && | ||
310 | d->sg && imxdma_hw_chain(imxdmac)) { | ||
311 | d->sg = sg_next(d->sg); | ||
312 | if (d->sg) { | ||
313 | u32 tmp; | ||
314 | imxdma_sg_next(d); | ||
315 | tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel)); | ||
316 | imx_dmav1_writel(imxdma, tmp | CCR_RPT | CCR_ACRPT, | ||
317 | DMA_CCR(channel)); | ||
318 | } | ||
319 | } | ||
320 | |||
321 | local_irq_restore(flags); | ||
322 | } | ||
323 | |||
324 | static void imxdma_disable_hw(struct imxdma_channel *imxdmac) | ||
325 | { | 63 | { |
326 | struct imxdma_engine *imxdma = imxdmac->imxdma; | 64 | if (imxdmac->desc.callback) |
327 | int channel = imxdmac->channel; | 65 | imxdmac->desc.callback(imxdmac->desc.callback_param); |
328 | unsigned long flags; | 66 | imxdmac->last_completed = imxdmac->desc.cookie; |
329 | |||
330 | dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel); | ||
331 | |||
332 | if (imxdma_hw_chain(imxdmac)) | ||
333 | del_timer(&imxdmac->watchdog); | ||
334 | |||
335 | local_irq_save(flags); | ||
336 | imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) | | ||
337 | (1 << channel), DMA_DIMR); | ||
338 | imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) & | ||
339 | ~CCR_CEN, DMA_CCR(channel)); | ||
340 | imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR); | ||
341 | local_irq_restore(flags); | ||
342 | } | 67 | } |
343 | 68 | ||
344 | static void imxdma_watchdog(unsigned long data) | 69 | static void imxdma_irq_handler(int channel, void *data) |
345 | { | 70 | { |
346 | struct imxdma_channel *imxdmac = (struct imxdma_channel *)data; | 71 | struct imxdma_channel *imxdmac = data; |
347 | struct imxdma_engine *imxdma = imxdmac->imxdma; | ||
348 | int channel = imxdmac->channel; | ||
349 | |||
350 | imx_dmav1_writel(imxdma, 0, DMA_CCR(channel)); | ||
351 | 72 | ||
352 | /* Tasklet watchdog error handler */ | 73 | imxdmac->status = DMA_SUCCESS; |
353 | tasklet_schedule(&imxdmac->dma_tasklet); | 74 | imxdma_handle(imxdmac); |
354 | dev_dbg(imxdma->dev, "channel %d: watchdog timeout!\n", | ||
355 | imxdmac->channel); | ||
356 | } | 75 | } |
357 | 76 | ||
358 | static irqreturn_t imxdma_err_handler(int irq, void *dev_id) | 77 | static void imxdma_err_handler(int channel, void *data, int error) |
359 | { | 78 | { |
360 | struct imxdma_engine *imxdma = dev_id; | 79 | struct imxdma_channel *imxdmac = data; |
361 | unsigned int err_mask; | ||
362 | int i, disr; | ||
363 | int errcode; | ||
364 | 80 | ||
365 | disr = imx_dmav1_readl(imxdma, DMA_DISR); | 81 | imxdmac->status = DMA_ERROR; |
366 | 82 | imxdma_handle(imxdmac); | |
367 | err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) | | ||
368 | imx_dmav1_readl(imxdma, DMA_DRTOSR) | | ||
369 | imx_dmav1_readl(imxdma, DMA_DSESR) | | ||
370 | imx_dmav1_readl(imxdma, DMA_DBOSR); | ||
371 | |||
372 | if (!err_mask) | ||
373 | return IRQ_HANDLED; | ||
374 | |||
375 | imx_dmav1_writel(imxdma, disr & err_mask, DMA_DISR); | ||
376 | |||
377 | for (i = 0; i < IMX_DMA_CHANNELS; i++) { | ||
378 | if (!(err_mask & (1 << i))) | ||
379 | continue; | ||
380 | errcode = 0; | ||
381 | |||
382 | if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) { | ||
383 | imx_dmav1_writel(imxdma, 1 << i, DMA_DBTOSR); | ||
384 | errcode |= IMX_DMA_ERR_BURST; | ||
385 | } | ||
386 | if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) { | ||
387 | imx_dmav1_writel(imxdma, 1 << i, DMA_DRTOSR); | ||
388 | errcode |= IMX_DMA_ERR_REQUEST; | ||
389 | } | ||
390 | if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) { | ||
391 | imx_dmav1_writel(imxdma, 1 << i, DMA_DSESR); | ||
392 | errcode |= IMX_DMA_ERR_TRANSFER; | ||
393 | } | ||
394 | if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) { | ||
395 | imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR); | ||
396 | errcode |= IMX_DMA_ERR_BUFFER; | ||
397 | } | ||
398 | /* Tasklet error handler */ | ||
399 | tasklet_schedule(&imxdma->channel[i].dma_tasklet); | ||
400 | |||
401 | printk(KERN_WARNING | ||
402 | "DMA timeout on channel %d -%s%s%s%s\n", i, | ||
403 | errcode & IMX_DMA_ERR_BURST ? " burst" : "", | ||
404 | errcode & IMX_DMA_ERR_REQUEST ? " request" : "", | ||
405 | errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "", | ||
406 | errcode & IMX_DMA_ERR_BUFFER ? " buffer" : ""); | ||
407 | } | ||
408 | return IRQ_HANDLED; | ||
409 | } | 83 | } |
410 | 84 | ||
411 | static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) | 85 | static void imxdma_progression(int channel, void *data, |
86 | struct scatterlist *sg) | ||
412 | { | 87 | { |
413 | struct imxdma_engine *imxdma = imxdmac->imxdma; | 88 | struct imxdma_channel *imxdmac = data; |
414 | int chno = imxdmac->channel; | ||
415 | struct imxdma_desc *desc; | ||
416 | |||
417 | spin_lock(&imxdma->lock); | ||
418 | if (list_empty(&imxdmac->ld_active)) { | ||
419 | spin_unlock(&imxdma->lock); | ||
420 | goto out; | ||
421 | } | ||
422 | |||
423 | desc = list_first_entry(&imxdmac->ld_active, | ||
424 | struct imxdma_desc, | ||
425 | node); | ||
426 | spin_unlock(&imxdma->lock); | ||
427 | |||
428 | if (desc->sg) { | ||
429 | u32 tmp; | ||
430 | desc->sg = sg_next(desc->sg); | ||
431 | 89 | ||
432 | if (desc->sg) { | 90 | imxdmac->status = DMA_SUCCESS; |
433 | imxdma_sg_next(desc); | 91 | imxdma_handle(imxdmac); |
434 | |||
435 | tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno)); | ||
436 | |||
437 | if (imxdma_hw_chain(imxdmac)) { | ||
438 | /* FIXME: The timeout should probably be | ||
439 | * configurable | ||
440 | */ | ||
441 | mod_timer(&imxdmac->watchdog, | ||
442 | jiffies + msecs_to_jiffies(500)); | ||
443 | |||
444 | tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT; | ||
445 | imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno)); | ||
446 | } else { | ||
447 | imx_dmav1_writel(imxdma, tmp & ~CCR_CEN, | ||
448 | DMA_CCR(chno)); | ||
449 | tmp |= CCR_CEN; | ||
450 | } | ||
451 | |||
452 | imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno)); | ||
453 | |||
454 | if (imxdma_chan_is_doing_cyclic(imxdmac)) | ||
455 | /* Tasklet progression */ | ||
456 | tasklet_schedule(&imxdmac->dma_tasklet); | ||
457 | |||
458 | return; | ||
459 | } | ||
460 | |||
461 | if (imxdma_hw_chain(imxdmac)) { | ||
462 | del_timer(&imxdmac->watchdog); | ||
463 | return; | ||
464 | } | ||
465 | } | ||
466 | |||
467 | out: | ||
468 | imx_dmav1_writel(imxdma, 0, DMA_CCR(chno)); | ||
469 | /* Tasklet irq */ | ||
470 | tasklet_schedule(&imxdmac->dma_tasklet); | ||
471 | } | ||
472 | |||
473 | static irqreturn_t dma_irq_handler(int irq, void *dev_id) | ||
474 | { | ||
475 | struct imxdma_engine *imxdma = dev_id; | ||
476 | int i, disr; | ||
477 | |||
478 | if (!is_imx1_dma(imxdma)) | ||
479 | imxdma_err_handler(irq, dev_id); | ||
480 | |||
481 | disr = imx_dmav1_readl(imxdma, DMA_DISR); | ||
482 | |||
483 | dev_dbg(imxdma->dev, "%s called, disr=0x%08x\n", __func__, disr); | ||
484 | |||
485 | imx_dmav1_writel(imxdma, disr, DMA_DISR); | ||
486 | for (i = 0; i < IMX_DMA_CHANNELS; i++) { | ||
487 | if (disr & (1 << i)) | ||
488 | dma_irq_handle_channel(&imxdma->channel[i]); | ||
489 | } | ||
490 | |||
491 | return IRQ_HANDLED; | ||
492 | } | ||
493 | |||
494 | static int imxdma_xfer_desc(struct imxdma_desc *d) | ||
495 | { | ||
496 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); | ||
497 | struct imxdma_engine *imxdma = imxdmac->imxdma; | ||
498 | unsigned long flags; | ||
499 | int slot = -1; | ||
500 | int i; | ||
501 | |||
502 | /* Configure and enable */ | ||
503 | switch (d->type) { | ||
504 | case IMXDMA_DESC_INTERLEAVED: | ||
505 | /* Try to get a free 2D slot */ | ||
506 | spin_lock_irqsave(&imxdma->lock, flags); | ||
507 | for (i = 0; i < IMX_DMA_2D_SLOTS; i++) { | ||
508 | if ((imxdma->slots_2d[i].count > 0) && | ||
509 | ((imxdma->slots_2d[i].xsr != d->x) || | ||
510 | (imxdma->slots_2d[i].ysr != d->y) || | ||
511 | (imxdma->slots_2d[i].wsr != d->w))) | ||
512 | continue; | ||
513 | slot = i; | ||
514 | break; | ||
515 | } | ||
516 | if (slot < 0) { | ||
517 | spin_unlock_irqrestore(&imxdma->lock, flags); | ||
518 | return -EBUSY; | ||
519 | } | ||
520 | |||
521 | imxdma->slots_2d[slot].xsr = d->x; | ||
522 | imxdma->slots_2d[slot].ysr = d->y; | ||
523 | imxdma->slots_2d[slot].wsr = d->w; | ||
524 | imxdma->slots_2d[slot].count++; | ||
525 | |||
526 | imxdmac->slot_2d = slot; | ||
527 | imxdmac->enabled_2d = true; | ||
528 | spin_unlock_irqrestore(&imxdma->lock, flags); | ||
529 | |||
530 | if (slot == IMX_DMA_2D_SLOT_A) { | ||
531 | d->config_mem &= ~CCR_MSEL_B; | ||
532 | d->config_port &= ~CCR_MSEL_B; | ||
533 | imx_dmav1_writel(imxdma, d->x, DMA_XSRA); | ||
534 | imx_dmav1_writel(imxdma, d->y, DMA_YSRA); | ||
535 | imx_dmav1_writel(imxdma, d->w, DMA_WSRA); | ||
536 | } else { | ||
537 | d->config_mem |= CCR_MSEL_B; | ||
538 | d->config_port |= CCR_MSEL_B; | ||
539 | imx_dmav1_writel(imxdma, d->x, DMA_XSRB); | ||
540 | imx_dmav1_writel(imxdma, d->y, DMA_YSRB); | ||
541 | imx_dmav1_writel(imxdma, d->w, DMA_WSRB); | ||
542 | } | ||
543 | /* | ||
544 | * We fall-through here intentionally, since a 2D transfer is | ||
545 | * similar to MEMCPY just adding the 2D slot configuration. | ||
546 | */ | ||
547 | case IMXDMA_DESC_MEMCPY: | ||
548 | imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel)); | ||
549 | imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel)); | ||
550 | imx_dmav1_writel(imxdma, d->config_mem | (d->config_port << 2), | ||
551 | DMA_CCR(imxdmac->channel)); | ||
552 | |||
553 | imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel)); | ||
554 | |||
555 | dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x " | ||
556 | "dma_length=%d\n", __func__, imxdmac->channel, | ||
557 | d->dest, d->src, d->len); | ||
558 | |||
559 | break; | ||
560 | /* Cyclic transfer is the same as slave_sg with special sg configuration. */ | ||
561 | case IMXDMA_DESC_CYCLIC: | ||
562 | case IMXDMA_DESC_SLAVE_SG: | ||
563 | if (d->direction == DMA_DEV_TO_MEM) { | ||
564 | imx_dmav1_writel(imxdma, imxdmac->per_address, | ||
565 | DMA_SAR(imxdmac->channel)); | ||
566 | imx_dmav1_writel(imxdma, imxdmac->ccr_from_device, | ||
567 | DMA_CCR(imxdmac->channel)); | ||
568 | |||
569 | dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " | ||
570 | "total length=%d dev_addr=0x%08x (dev2mem)\n", | ||
571 | __func__, imxdmac->channel, d->sg, d->sgcount, | ||
572 | d->len, imxdmac->per_address); | ||
573 | } else if (d->direction == DMA_MEM_TO_DEV) { | ||
574 | imx_dmav1_writel(imxdma, imxdmac->per_address, | ||
575 | DMA_DAR(imxdmac->channel)); | ||
576 | imx_dmav1_writel(imxdma, imxdmac->ccr_to_device, | ||
577 | DMA_CCR(imxdmac->channel)); | ||
578 | |||
579 | dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " | ||
580 | "total length=%d dev_addr=0x%08x (mem2dev)\n", | ||
581 | __func__, imxdmac->channel, d->sg, d->sgcount, | ||
582 | d->len, imxdmac->per_address); | ||
583 | } else { | ||
584 | dev_err(imxdma->dev, "%s channel: %d bad dma mode\n", | ||
585 | __func__, imxdmac->channel); | ||
586 | return -EINVAL; | ||
587 | } | ||
588 | |||
589 | imxdma_sg_next(d); | ||
590 | |||
591 | break; | ||
592 | default: | ||
593 | return -EINVAL; | ||
594 | } | ||
595 | imxdma_enable_hw(d); | ||
596 | return 0; | ||
597 | } | ||
598 | |||
599 | static void imxdma_tasklet(unsigned long data) | ||
600 | { | ||
601 | struct imxdma_channel *imxdmac = (void *)data; | ||
602 | struct imxdma_engine *imxdma = imxdmac->imxdma; | ||
603 | struct imxdma_desc *desc; | ||
604 | |||
605 | spin_lock(&imxdma->lock); | ||
606 | |||
607 | if (list_empty(&imxdmac->ld_active)) { | ||
608 | /* Someone might have called terminate all */ | ||
609 | goto out; | ||
610 | } | ||
611 | desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node); | ||
612 | |||
613 | if (desc->desc.callback) | ||
614 | desc->desc.callback(desc->desc.callback_param); | ||
615 | |||
616 | /* If we are dealing with a cyclic descriptor, keep it on ld_active | ||
617 | * and dont mark the descriptor as complete. | ||
618 | * Only in non-cyclic cases it would be marked as complete | ||
619 | */ | ||
620 | if (imxdma_chan_is_doing_cyclic(imxdmac)) | ||
621 | goto out; | ||
622 | else | ||
623 | dma_cookie_complete(&desc->desc); | ||
624 | |||
625 | /* Free 2D slot if it was an interleaved transfer */ | ||
626 | if (imxdmac->enabled_2d) { | ||
627 | imxdma->slots_2d[imxdmac->slot_2d].count--; | ||
628 | imxdmac->enabled_2d = false; | ||
629 | } | ||
630 | |||
631 | list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free); | ||
632 | |||
633 | if (!list_empty(&imxdmac->ld_queue)) { | ||
634 | desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc, | ||
635 | node); | ||
636 | list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active); | ||
637 | if (imxdma_xfer_desc(desc) < 0) | ||
638 | dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n", | ||
639 | __func__, imxdmac->channel); | ||
640 | } | ||
641 | out: | ||
642 | spin_unlock(&imxdma->lock); | ||
643 | } | 92 | } |
644 | 93 | ||
645 | static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 94 | static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
@@ -647,21 +96,16 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
647 | { | 96 | { |
648 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | 97 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
649 | struct dma_slave_config *dmaengine_cfg = (void *)arg; | 98 | struct dma_slave_config *dmaengine_cfg = (void *)arg; |
650 | struct imxdma_engine *imxdma = imxdmac->imxdma; | 99 | int ret; |
651 | unsigned long flags; | ||
652 | unsigned int mode = 0; | 100 | unsigned int mode = 0; |
653 | 101 | ||
654 | switch (cmd) { | 102 | switch (cmd) { |
655 | case DMA_TERMINATE_ALL: | 103 | case DMA_TERMINATE_ALL: |
656 | imxdma_disable_hw(imxdmac); | 104 | imxdmac->status = DMA_ERROR; |
657 | 105 | imx_dma_disable(imxdmac->imxdma_channel); | |
658 | spin_lock_irqsave(&imxdma->lock, flags); | ||
659 | list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); | ||
660 | list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); | ||
661 | spin_unlock_irqrestore(&imxdma->lock, flags); | ||
662 | return 0; | 106 | return 0; |
663 | case DMA_SLAVE_CONFIG: | 107 | case DMA_SLAVE_CONFIG: |
664 | if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { | 108 | if (dmaengine_cfg->direction == DMA_FROM_DEVICE) { |
665 | imxdmac->per_address = dmaengine_cfg->src_addr; | 109 | imxdmac->per_address = dmaengine_cfg->src_addr; |
666 | imxdmac->watermark_level = dmaengine_cfg->src_maxburst; | 110 | imxdmac->watermark_level = dmaengine_cfg->src_maxburst; |
667 | imxdmac->word_size = dmaengine_cfg->src_addr_width; | 111 | imxdmac->word_size = dmaengine_cfg->src_addr_width; |
@@ -683,22 +127,16 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
683 | mode = IMX_DMA_MEMSIZE_32; | 127 | mode = IMX_DMA_MEMSIZE_32; |
684 | break; | 128 | break; |
685 | } | 129 | } |
130 | ret = imx_dma_config_channel(imxdmac->imxdma_channel, | ||
131 | mode | IMX_DMA_TYPE_FIFO, | ||
132 | IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR, | ||
133 | imxdmac->dma_request, 1); | ||
134 | |||
135 | if (ret) | ||
136 | return ret; | ||
686 | 137 | ||
687 | imxdmac->hw_chaining = 1; | 138 | imx_dma_config_burstlen(imxdmac->imxdma_channel, |
688 | if (!imxdma_hw_chain(imxdmac)) | 139 | imxdmac->watermark_level * imxdmac->word_size); |
689 | return -EINVAL; | ||
690 | imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) | | ||
691 | ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) | | ||
692 | CCR_REN; | ||
693 | imxdmac->ccr_to_device = | ||
694 | (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) | | ||
695 | ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN; | ||
696 | imx_dmav1_writel(imxdma, imxdmac->dma_request, | ||
697 | DMA_RSSR(imxdmac->channel)); | ||
698 | |||
699 | /* Set burst length */ | ||
700 | imx_dmav1_writel(imxdma, imxdmac->watermark_level * | ||
701 | imxdmac->word_size, DMA_BLR(imxdmac->channel)); | ||
702 | 140 | ||
703 | return 0; | 141 | return 0; |
704 | default: | 142 | default: |
@@ -712,20 +150,43 @@ static enum dma_status imxdma_tx_status(struct dma_chan *chan, | |||
712 | dma_cookie_t cookie, | 150 | dma_cookie_t cookie, |
713 | struct dma_tx_state *txstate) | 151 | struct dma_tx_state *txstate) |
714 | { | 152 | { |
715 | return dma_cookie_status(chan, cookie, txstate); | 153 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
154 | dma_cookie_t last_used; | ||
155 | enum dma_status ret; | ||
156 | |||
157 | last_used = chan->cookie; | ||
158 | |||
159 | ret = dma_async_is_complete(cookie, imxdmac->last_completed, last_used); | ||
160 | dma_set_tx_state(txstate, imxdmac->last_completed, last_used, 0); | ||
161 | |||
162 | return ret; | ||
163 | } | ||
164 | |||
165 | static dma_cookie_t imxdma_assign_cookie(struct imxdma_channel *imxdma) | ||
166 | { | ||
167 | dma_cookie_t cookie = imxdma->chan.cookie; | ||
168 | |||
169 | if (++cookie < 0) | ||
170 | cookie = 1; | ||
171 | |||
172 | imxdma->chan.cookie = cookie; | ||
173 | imxdma->desc.cookie = cookie; | ||
174 | |||
175 | return cookie; | ||
716 | } | 176 | } |
717 | 177 | ||
718 | static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) | 178 | static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) |
719 | { | 179 | { |
720 | struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan); | 180 | struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan); |
721 | struct imxdma_engine *imxdma = imxdmac->imxdma; | ||
722 | dma_cookie_t cookie; | 181 | dma_cookie_t cookie; |
723 | unsigned long flags; | ||
724 | 182 | ||
725 | spin_lock_irqsave(&imxdma->lock, flags); | 183 | spin_lock_irq(&imxdmac->lock); |
726 | list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue); | 184 | |
727 | cookie = dma_cookie_assign(tx); | 185 | cookie = imxdma_assign_cookie(imxdmac); |
728 | spin_unlock_irqrestore(&imxdma->lock, flags); | 186 | |
187 | imx_dma_enable(imxdmac->imxdma_channel); | ||
188 | |||
189 | spin_unlock_irq(&imxdmac->lock); | ||
729 | 190 | ||
730 | return cookie; | 191 | return cookie; |
731 | } | 192 | } |
@@ -735,52 +196,23 @@ static int imxdma_alloc_chan_resources(struct dma_chan *chan) | |||
735 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | 196 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
736 | struct imx_dma_data *data = chan->private; | 197 | struct imx_dma_data *data = chan->private; |
737 | 198 | ||
738 | if (data != NULL) | 199 | imxdmac->dma_request = data->dma_request; |
739 | imxdmac->dma_request = data->dma_request; | ||
740 | 200 | ||
741 | while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) { | 201 | dma_async_tx_descriptor_init(&imxdmac->desc, chan); |
742 | struct imxdma_desc *desc; | 202 | imxdmac->desc.tx_submit = imxdma_tx_submit; |
203 | /* txd.flags will be overwritten in prep funcs */ | ||
204 | imxdmac->desc.flags = DMA_CTRL_ACK; | ||
743 | 205 | ||
744 | desc = kzalloc(sizeof(*desc), GFP_KERNEL); | 206 | imxdmac->status = DMA_SUCCESS; |
745 | if (!desc) | ||
746 | break; | ||
747 | __memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor)); | ||
748 | dma_async_tx_descriptor_init(&desc->desc, chan); | ||
749 | desc->desc.tx_submit = imxdma_tx_submit; | ||
750 | /* txd.flags will be overwritten in prep funcs */ | ||
751 | desc->desc.flags = DMA_CTRL_ACK; | ||
752 | desc->status = DMA_SUCCESS; | ||
753 | |||
754 | list_add_tail(&desc->node, &imxdmac->ld_free); | ||
755 | imxdmac->descs_allocated++; | ||
756 | } | ||
757 | |||
758 | if (!imxdmac->descs_allocated) | ||
759 | return -ENOMEM; | ||
760 | 207 | ||
761 | return imxdmac->descs_allocated; | 208 | return 0; |
762 | } | 209 | } |
763 | 210 | ||
764 | static void imxdma_free_chan_resources(struct dma_chan *chan) | 211 | static void imxdma_free_chan_resources(struct dma_chan *chan) |
765 | { | 212 | { |
766 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | 213 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
767 | struct imxdma_engine *imxdma = imxdmac->imxdma; | ||
768 | struct imxdma_desc *desc, *_desc; | ||
769 | unsigned long flags; | ||
770 | |||
771 | spin_lock_irqsave(&imxdma->lock, flags); | ||
772 | |||
773 | imxdma_disable_hw(imxdmac); | ||
774 | list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); | ||
775 | list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); | ||
776 | 214 | ||
777 | spin_unlock_irqrestore(&imxdma->lock, flags); | 215 | imx_dma_disable(imxdmac->imxdma_channel); |
778 | |||
779 | list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) { | ||
780 | kfree(desc); | ||
781 | imxdmac->descs_allocated--; | ||
782 | } | ||
783 | INIT_LIST_HEAD(&imxdmac->ld_free); | ||
784 | 216 | ||
785 | if (imxdmac->sg_list) { | 217 | if (imxdmac->sg_list) { |
786 | kfree(imxdmac->sg_list); | 218 | kfree(imxdmac->sg_list); |
@@ -790,31 +222,35 @@ static void imxdma_free_chan_resources(struct dma_chan *chan) | |||
790 | 222 | ||
791 | static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( | 223 | static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( |
792 | struct dma_chan *chan, struct scatterlist *sgl, | 224 | struct dma_chan *chan, struct scatterlist *sgl, |
793 | unsigned int sg_len, enum dma_transfer_direction direction, | 225 | unsigned int sg_len, enum dma_data_direction direction, |
794 | unsigned long flags, void *context) | 226 | unsigned long flags) |
795 | { | 227 | { |
796 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | 228 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
797 | struct scatterlist *sg; | 229 | struct scatterlist *sg; |
798 | int i, dma_length = 0; | 230 | int i, ret, dma_length = 0; |
799 | struct imxdma_desc *desc; | 231 | unsigned int dmamode; |
800 | 232 | ||
801 | if (list_empty(&imxdmac->ld_free) || | 233 | if (imxdmac->status == DMA_IN_PROGRESS) |
802 | imxdma_chan_is_doing_cyclic(imxdmac)) | ||
803 | return NULL; | 234 | return NULL; |
804 | 235 | ||
805 | desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); | 236 | imxdmac->status = DMA_IN_PROGRESS; |
806 | 237 | ||
807 | for_each_sg(sgl, sg, sg_len, i) { | 238 | for_each_sg(sgl, sg, sg_len, i) { |
808 | dma_length += sg_dma_len(sg); | 239 | dma_length += sg->length; |
809 | } | 240 | } |
810 | 241 | ||
242 | if (direction == DMA_FROM_DEVICE) | ||
243 | dmamode = DMA_MODE_READ; | ||
244 | else | ||
245 | dmamode = DMA_MODE_WRITE; | ||
246 | |||
811 | switch (imxdmac->word_size) { | 247 | switch (imxdmac->word_size) { |
812 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | 248 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
813 | if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3) | 249 | if (sgl->length & 3 || sgl->dma_address & 3) |
814 | return NULL; | 250 | return NULL; |
815 | break; | 251 | break; |
816 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | 252 | case DMA_SLAVE_BUSWIDTH_2_BYTES: |
817 | if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1) | 253 | if (sgl->length & 1 || sgl->dma_address & 1) |
818 | return NULL; | 254 | return NULL; |
819 | break; | 255 | break; |
820 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | 256 | case DMA_SLAVE_BUSWIDTH_1_BYTE: |
@@ -823,41 +259,37 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( | |||
823 | return NULL; | 259 | return NULL; |
824 | } | 260 | } |
825 | 261 | ||
826 | desc->type = IMXDMA_DESC_SLAVE_SG; | 262 | ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len, |
827 | desc->sg = sgl; | 263 | dma_length, imxdmac->per_address, dmamode); |
828 | desc->sgcount = sg_len; | 264 | if (ret) |
829 | desc->len = dma_length; | 265 | return NULL; |
830 | desc->direction = direction; | ||
831 | if (direction == DMA_DEV_TO_MEM) { | ||
832 | desc->src = imxdmac->per_address; | ||
833 | } else { | ||
834 | desc->dest = imxdmac->per_address; | ||
835 | } | ||
836 | desc->desc.callback = NULL; | ||
837 | desc->desc.callback_param = NULL; | ||
838 | 266 | ||
839 | return &desc->desc; | 267 | return &imxdmac->desc; |
840 | } | 268 | } |
841 | 269 | ||
842 | static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( | 270 | static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( |
843 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | 271 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, |
844 | size_t period_len, enum dma_transfer_direction direction, | 272 | size_t period_len, enum dma_data_direction direction) |
845 | unsigned long flags, void *context) | ||
846 | { | 273 | { |
847 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | 274 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
848 | struct imxdma_engine *imxdma = imxdmac->imxdma; | 275 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
849 | struct imxdma_desc *desc; | 276 | int i, ret; |
850 | int i; | ||
851 | unsigned int periods = buf_len / period_len; | 277 | unsigned int periods = buf_len / period_len; |
278 | unsigned int dmamode; | ||
852 | 279 | ||
853 | dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n", | 280 | dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n", |
854 | __func__, imxdmac->channel, buf_len, period_len); | 281 | __func__, imxdmac->channel, buf_len, period_len); |
855 | 282 | ||
856 | if (list_empty(&imxdmac->ld_free) || | 283 | if (imxdmac->status == DMA_IN_PROGRESS) |
857 | imxdma_chan_is_doing_cyclic(imxdmac)) | ||
858 | return NULL; | 284 | return NULL; |
285 | imxdmac->status = DMA_IN_PROGRESS; | ||
859 | 286 | ||
860 | desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); | 287 | ret = imx_dma_setup_progression_handler(imxdmac->imxdma_channel, |
288 | imxdma_progression); | ||
289 | if (ret) { | ||
290 | dev_err(imxdma->dev, "Failed to setup the DMA handler\n"); | ||
291 | return NULL; | ||
292 | } | ||
861 | 293 | ||
862 | if (imxdmac->sg_list) | 294 | if (imxdmac->sg_list) |
863 | kfree(imxdmac->sg_list); | 295 | kfree(imxdmac->sg_list); |
@@ -873,243 +305,72 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( | |||
873 | imxdmac->sg_list[i].page_link = 0; | 305 | imxdmac->sg_list[i].page_link = 0; |
874 | imxdmac->sg_list[i].offset = 0; | 306 | imxdmac->sg_list[i].offset = 0; |
875 | imxdmac->sg_list[i].dma_address = dma_addr; | 307 | imxdmac->sg_list[i].dma_address = dma_addr; |
876 | sg_dma_len(&imxdmac->sg_list[i]) = period_len; | 308 | imxdmac->sg_list[i].length = period_len; |
877 | dma_addr += period_len; | 309 | dma_addr += period_len; |
878 | } | 310 | } |
879 | 311 | ||
880 | /* close the loop */ | 312 | /* close the loop */ |
881 | imxdmac->sg_list[periods].offset = 0; | 313 | imxdmac->sg_list[periods].offset = 0; |
882 | sg_dma_len(&imxdmac->sg_list[periods]) = 0; | 314 | imxdmac->sg_list[periods].length = 0; |
883 | imxdmac->sg_list[periods].page_link = | 315 | imxdmac->sg_list[periods].page_link = |
884 | ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; | 316 | ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; |
885 | 317 | ||
886 | desc->type = IMXDMA_DESC_CYCLIC; | 318 | if (direction == DMA_FROM_DEVICE) |
887 | desc->sg = imxdmac->sg_list; | 319 | dmamode = DMA_MODE_READ; |
888 | desc->sgcount = periods; | 320 | else |
889 | desc->len = IMX_DMA_LENGTH_LOOP; | 321 | dmamode = DMA_MODE_WRITE; |
890 | desc->direction = direction; | ||
891 | if (direction == DMA_DEV_TO_MEM) { | ||
892 | desc->src = imxdmac->per_address; | ||
893 | } else { | ||
894 | desc->dest = imxdmac->per_address; | ||
895 | } | ||
896 | desc->desc.callback = NULL; | ||
897 | desc->desc.callback_param = NULL; | ||
898 | |||
899 | return &desc->desc; | ||
900 | } | ||
901 | |||
902 | static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy( | ||
903 | struct dma_chan *chan, dma_addr_t dest, | ||
904 | dma_addr_t src, size_t len, unsigned long flags) | ||
905 | { | ||
906 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | ||
907 | struct imxdma_engine *imxdma = imxdmac->imxdma; | ||
908 | struct imxdma_desc *desc; | ||
909 | |||
910 | dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n", | ||
911 | __func__, imxdmac->channel, src, dest, len); | ||
912 | |||
913 | if (list_empty(&imxdmac->ld_free) || | ||
914 | imxdma_chan_is_doing_cyclic(imxdmac)) | ||
915 | return NULL; | ||
916 | |||
917 | desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); | ||
918 | |||
919 | desc->type = IMXDMA_DESC_MEMCPY; | ||
920 | desc->src = src; | ||
921 | desc->dest = dest; | ||
922 | desc->len = len; | ||
923 | desc->direction = DMA_MEM_TO_MEM; | ||
924 | desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; | ||
925 | desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; | ||
926 | desc->desc.callback = NULL; | ||
927 | desc->desc.callback_param = NULL; | ||
928 | |||
929 | return &desc->desc; | ||
930 | } | ||
931 | |||
932 | static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved( | ||
933 | struct dma_chan *chan, struct dma_interleaved_template *xt, | ||
934 | unsigned long flags) | ||
935 | { | ||
936 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | ||
937 | struct imxdma_engine *imxdma = imxdmac->imxdma; | ||
938 | struct imxdma_desc *desc; | ||
939 | |||
940 | dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%x dst_start=0x%x\n" | ||
941 | " src_sgl=%s dst_sgl=%s numf=%d frame_size=%d\n", __func__, | ||
942 | imxdmac->channel, xt->src_start, xt->dst_start, | ||
943 | xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false", | ||
944 | xt->numf, xt->frame_size); | ||
945 | |||
946 | if (list_empty(&imxdmac->ld_free) || | ||
947 | imxdma_chan_is_doing_cyclic(imxdmac)) | ||
948 | return NULL; | ||
949 | 322 | ||
950 | if (xt->frame_size != 1 || xt->numf <= 0 || xt->dir != DMA_MEM_TO_MEM) | 323 | ret = imx_dma_setup_sg(imxdmac->imxdma_channel, imxdmac->sg_list, periods, |
324 | IMX_DMA_LENGTH_LOOP, imxdmac->per_address, dmamode); | ||
325 | if (ret) | ||
951 | return NULL; | 326 | return NULL; |
952 | 327 | ||
953 | desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); | 328 | return &imxdmac->desc; |
954 | |||
955 | desc->type = IMXDMA_DESC_INTERLEAVED; | ||
956 | desc->src = xt->src_start; | ||
957 | desc->dest = xt->dst_start; | ||
958 | desc->x = xt->sgl[0].size; | ||
959 | desc->y = xt->numf; | ||
960 | desc->w = xt->sgl[0].icg + desc->x; | ||
961 | desc->len = desc->x * desc->y; | ||
962 | desc->direction = DMA_MEM_TO_MEM; | ||
963 | desc->config_port = IMX_DMA_MEMSIZE_32; | ||
964 | desc->config_mem = IMX_DMA_MEMSIZE_32; | ||
965 | if (xt->src_sgl) | ||
966 | desc->config_mem |= IMX_DMA_TYPE_2D; | ||
967 | if (xt->dst_sgl) | ||
968 | desc->config_port |= IMX_DMA_TYPE_2D; | ||
969 | desc->desc.callback = NULL; | ||
970 | desc->desc.callback_param = NULL; | ||
971 | |||
972 | return &desc->desc; | ||
973 | } | 329 | } |
974 | 330 | ||
975 | static void imxdma_issue_pending(struct dma_chan *chan) | 331 | static void imxdma_issue_pending(struct dma_chan *chan) |
976 | { | 332 | { |
977 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | 333 | /* |
978 | struct imxdma_engine *imxdma = imxdmac->imxdma; | 334 | * Nothing to do. We only have a single descriptor |
979 | struct imxdma_desc *desc; | 335 | */ |
980 | unsigned long flags; | ||
981 | |||
982 | spin_lock_irqsave(&imxdma->lock, flags); | ||
983 | if (list_empty(&imxdmac->ld_active) && | ||
984 | !list_empty(&imxdmac->ld_queue)) { | ||
985 | desc = list_first_entry(&imxdmac->ld_queue, | ||
986 | struct imxdma_desc, node); | ||
987 | |||
988 | if (imxdma_xfer_desc(desc) < 0) { | ||
989 | dev_warn(imxdma->dev, | ||
990 | "%s: channel: %d couldn't issue DMA xfer\n", | ||
991 | __func__, imxdmac->channel); | ||
992 | } else { | ||
993 | list_move_tail(imxdmac->ld_queue.next, | ||
994 | &imxdmac->ld_active); | ||
995 | } | ||
996 | } | ||
997 | spin_unlock_irqrestore(&imxdma->lock, flags); | ||
998 | } | 336 | } |
999 | 337 | ||
1000 | static int __init imxdma_probe(struct platform_device *pdev) | 338 | static int __init imxdma_probe(struct platform_device *pdev) |
1001 | { | 339 | { |
1002 | struct imxdma_engine *imxdma; | 340 | struct imxdma_engine *imxdma; |
1003 | struct resource *res; | ||
1004 | int ret, i; | 341 | int ret, i; |
1005 | int irq, irq_err; | ||
1006 | 342 | ||
1007 | imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL); | 343 | imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL); |
1008 | if (!imxdma) | 344 | if (!imxdma) |
1009 | return -ENOMEM; | 345 | return -ENOMEM; |
1010 | 346 | ||
1011 | imxdma->devtype = pdev->id_entry->driver_data; | ||
1012 | |||
1013 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1014 | imxdma->base = devm_request_and_ioremap(&pdev->dev, res); | ||
1015 | if (!imxdma->base) | ||
1016 | return -EADDRNOTAVAIL; | ||
1017 | |||
1018 | irq = platform_get_irq(pdev, 0); | ||
1019 | if (irq < 0) | ||
1020 | return irq; | ||
1021 | |||
1022 | imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg"); | ||
1023 | if (IS_ERR(imxdma->dma_ipg)) | ||
1024 | return PTR_ERR(imxdma->dma_ipg); | ||
1025 | |||
1026 | imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb"); | ||
1027 | if (IS_ERR(imxdma->dma_ahb)) | ||
1028 | return PTR_ERR(imxdma->dma_ahb); | ||
1029 | |||
1030 | clk_prepare_enable(imxdma->dma_ipg); | ||
1031 | clk_prepare_enable(imxdma->dma_ahb); | ||
1032 | |||
1033 | /* reset DMA module */ | ||
1034 | imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR); | ||
1035 | |||
1036 | if (is_imx1_dma(imxdma)) { | ||
1037 | ret = devm_request_irq(&pdev->dev, irq, | ||
1038 | dma_irq_handler, 0, "DMA", imxdma); | ||
1039 | if (ret) { | ||
1040 | dev_warn(imxdma->dev, "Can't register IRQ for DMA\n"); | ||
1041 | goto err; | ||
1042 | } | ||
1043 | |||
1044 | irq_err = platform_get_irq(pdev, 1); | ||
1045 | if (irq_err < 0) { | ||
1046 | ret = irq_err; | ||
1047 | goto err; | ||
1048 | } | ||
1049 | |||
1050 | ret = devm_request_irq(&pdev->dev, irq_err, | ||
1051 | imxdma_err_handler, 0, "DMA", imxdma); | ||
1052 | if (ret) { | ||
1053 | dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n"); | ||
1054 | goto err; | ||
1055 | } | ||
1056 | } | ||
1057 | |||
1058 | /* enable DMA module */ | ||
1059 | imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR); | ||
1060 | |||
1061 | /* clear all interrupts */ | ||
1062 | imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR); | ||
1063 | |||
1064 | /* disable interrupts */ | ||
1065 | imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR); | ||
1066 | |||
1067 | INIT_LIST_HEAD(&imxdma->dma_device.channels); | 347 | INIT_LIST_HEAD(&imxdma->dma_device.channels); |
1068 | 348 | ||
1069 | dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask); | 349 | dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask); |
1070 | dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask); | 350 | dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask); |
1071 | dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask); | ||
1072 | dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask); | ||
1073 | |||
1074 | /* Initialize 2D global parameters */ | ||
1075 | for (i = 0; i < IMX_DMA_2D_SLOTS; i++) | ||
1076 | imxdma->slots_2d[i].count = 0; | ||
1077 | |||
1078 | spin_lock_init(&imxdma->lock); | ||
1079 | 351 | ||
1080 | /* Initialize channel parameters */ | 352 | /* Initialize channel parameters */ |
1081 | for (i = 0; i < IMX_DMA_CHANNELS; i++) { | 353 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { |
1082 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; | 354 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; |
1083 | 355 | ||
1084 | if (!is_imx1_dma(imxdma)) { | 356 | imxdmac->imxdma_channel = imx_dma_request_by_prio("dmaengine", |
1085 | ret = devm_request_irq(&pdev->dev, irq + i, | 357 | DMA_PRIO_MEDIUM); |
1086 | dma_irq_handler, 0, "DMA", imxdma); | 358 | if ((int)imxdmac->channel < 0) { |
1087 | if (ret) { | 359 | ret = -ENODEV; |
1088 | dev_warn(imxdma->dev, "Can't register IRQ %d " | 360 | goto err_init; |
1089 | "for DMA channel %d\n", | ||
1090 | irq + i, i); | ||
1091 | goto err; | ||
1092 | } | ||
1093 | init_timer(&imxdmac->watchdog); | ||
1094 | imxdmac->watchdog.function = &imxdma_watchdog; | ||
1095 | imxdmac->watchdog.data = (unsigned long)imxdmac; | ||
1096 | } | 361 | } |
1097 | 362 | ||
1098 | imxdmac->imxdma = imxdma; | 363 | imx_dma_setup_handlers(imxdmac->imxdma_channel, |
364 | imxdma_irq_handler, imxdma_err_handler, imxdmac); | ||
1099 | 365 | ||
1100 | INIT_LIST_HEAD(&imxdmac->ld_queue); | 366 | imxdmac->imxdma = imxdma; |
1101 | INIT_LIST_HEAD(&imxdmac->ld_free); | 367 | spin_lock_init(&imxdmac->lock); |
1102 | INIT_LIST_HEAD(&imxdmac->ld_active); | ||
1103 | 368 | ||
1104 | tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet, | ||
1105 | (unsigned long)imxdmac); | ||
1106 | imxdmac->chan.device = &imxdma->dma_device; | 369 | imxdmac->chan.device = &imxdma->dma_device; |
1107 | dma_cookie_init(&imxdmac->chan); | ||
1108 | imxdmac->channel = i; | 370 | imxdmac->channel = i; |
1109 | 371 | ||
1110 | /* Add the channel to the DMAC list */ | 372 | /* Add the channel to the DMAC list */ |
1111 | list_add_tail(&imxdmac->chan.device_node, | 373 | list_add_tail(&imxdmac->chan.device_node, &imxdma->dma_device.channels); |
1112 | &imxdma->dma_device.channels); | ||
1113 | } | 374 | } |
1114 | 375 | ||
1115 | imxdma->dev = &pdev->dev; | 376 | imxdma->dev = &pdev->dev; |
@@ -1120,39 +381,46 @@ static int __init imxdma_probe(struct platform_device *pdev) | |||
1120 | imxdma->dma_device.device_tx_status = imxdma_tx_status; | 381 | imxdma->dma_device.device_tx_status = imxdma_tx_status; |
1121 | imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg; | 382 | imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg; |
1122 | imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic; | 383 | imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic; |
1123 | imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy; | ||
1124 | imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved; | ||
1125 | imxdma->dma_device.device_control = imxdma_control; | 384 | imxdma->dma_device.device_control = imxdma_control; |
1126 | imxdma->dma_device.device_issue_pending = imxdma_issue_pending; | 385 | imxdma->dma_device.device_issue_pending = imxdma_issue_pending; |
1127 | 386 | ||
1128 | platform_set_drvdata(pdev, imxdma); | 387 | platform_set_drvdata(pdev, imxdma); |
1129 | 388 | ||
1130 | imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */ | ||
1131 | imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms; | 389 | imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms; |
1132 | dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff); | 390 | dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff); |
1133 | 391 | ||
1134 | ret = dma_async_device_register(&imxdma->dma_device); | 392 | ret = dma_async_device_register(&imxdma->dma_device); |
1135 | if (ret) { | 393 | if (ret) { |
1136 | dev_err(&pdev->dev, "unable to register\n"); | 394 | dev_err(&pdev->dev, "unable to register\n"); |
1137 | goto err; | 395 | goto err_init; |
1138 | } | 396 | } |
1139 | 397 | ||
1140 | return 0; | 398 | return 0; |
1141 | 399 | ||
1142 | err: | 400 | err_init: |
1143 | clk_disable_unprepare(imxdma->dma_ipg); | 401 | while (--i >= 0) { |
1144 | clk_disable_unprepare(imxdma->dma_ahb); | 402 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; |
403 | imx_dma_free(imxdmac->imxdma_channel); | ||
404 | } | ||
405 | |||
406 | kfree(imxdma); | ||
1145 | return ret; | 407 | return ret; |
1146 | } | 408 | } |
1147 | 409 | ||
1148 | static int __exit imxdma_remove(struct platform_device *pdev) | 410 | static int __exit imxdma_remove(struct platform_device *pdev) |
1149 | { | 411 | { |
1150 | struct imxdma_engine *imxdma = platform_get_drvdata(pdev); | 412 | struct imxdma_engine *imxdma = platform_get_drvdata(pdev); |
413 | int i; | ||
1151 | 414 | ||
1152 | dma_async_device_unregister(&imxdma->dma_device); | 415 | dma_async_device_unregister(&imxdma->dma_device); |
1153 | 416 | ||
1154 | clk_disable_unprepare(imxdma->dma_ipg); | 417 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { |
1155 | clk_disable_unprepare(imxdma->dma_ahb); | 418 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; |
419 | |||
420 | imx_dma_free(imxdmac->imxdma_channel); | ||
421 | } | ||
422 | |||
423 | kfree(imxdma); | ||
1156 | 424 | ||
1157 | return 0; | 425 | return 0; |
1158 | } | 426 | } |
@@ -1161,7 +429,6 @@ static struct platform_driver imxdma_driver = { | |||
1161 | .driver = { | 429 | .driver = { |
1162 | .name = "imx-dma", | 430 | .name = "imx-dma", |
1163 | }, | 431 | }, |
1164 | .id_table = imx_dma_devtype, | ||
1165 | .remove = __exit_p(imxdma_remove), | 432 | .remove = __exit_p(imxdma_remove), |
1166 | }; | 433 | }; |
1167 | 434 | ||
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index f082aa3a918..7bd7e98548c 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -18,13 +18,11 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/module.h> | ||
22 | #include <linux/types.h> | 21 | #include <linux/types.h> |
23 | #include <linux/bitops.h> | ||
24 | #include <linux/mm.h> | 22 | #include <linux/mm.h> |
25 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
26 | #include <linux/clk.h> | 24 | #include <linux/clk.h> |
27 | #include <linux/delay.h> | 25 | #include <linux/wait.h> |
28 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
29 | #include <linux/semaphore.h> | 27 | #include <linux/semaphore.h> |
30 | #include <linux/spinlock.h> | 28 | #include <linux/spinlock.h> |
@@ -38,10 +36,9 @@ | |||
38 | #include <linux/of_device.h> | 36 | #include <linux/of_device.h> |
39 | 37 | ||
40 | #include <asm/irq.h> | 38 | #include <asm/irq.h> |
41 | #include <linux/platform_data/dma-imx-sdma.h> | 39 | #include <mach/sdma.h> |
42 | #include <linux/platform_data/dma-imx.h> | 40 | #include <mach/dma.h> |
43 | 41 | #include <mach/hardware.h> | |
44 | #include "dmaengine.h" | ||
45 | 42 | ||
46 | /* SDMA registers */ | 43 | /* SDMA registers */ |
47 | #define SDMA_H_C0PTR 0x000 | 44 | #define SDMA_H_C0PTR 0x000 |
@@ -248,7 +245,7 @@ struct sdma_engine; | |||
248 | struct sdma_channel { | 245 | struct sdma_channel { |
249 | struct sdma_engine *sdma; | 246 | struct sdma_engine *sdma; |
250 | unsigned int channel; | 247 | unsigned int channel; |
251 | enum dma_transfer_direction direction; | 248 | enum dma_data_direction direction; |
252 | enum sdma_peripheral_type peripheral_type; | 249 | enum sdma_peripheral_type peripheral_type; |
253 | unsigned int event_id0; | 250 | unsigned int event_id0; |
254 | unsigned int event_id1; | 251 | unsigned int event_id1; |
@@ -261,19 +258,17 @@ struct sdma_channel { | |||
261 | unsigned int pc_from_device, pc_to_device; | 258 | unsigned int pc_from_device, pc_to_device; |
262 | unsigned long flags; | 259 | unsigned long flags; |
263 | dma_addr_t per_address; | 260 | dma_addr_t per_address; |
264 | unsigned long event_mask[2]; | 261 | u32 event_mask0, event_mask1; |
265 | unsigned long watermark_level; | 262 | u32 watermark_level; |
266 | u32 shp_addr, per_addr; | 263 | u32 shp_addr, per_addr; |
267 | struct dma_chan chan; | 264 | struct dma_chan chan; |
268 | spinlock_t lock; | 265 | spinlock_t lock; |
269 | struct dma_async_tx_descriptor desc; | 266 | struct dma_async_tx_descriptor desc; |
267 | dma_cookie_t last_completed; | ||
270 | enum dma_status status; | 268 | enum dma_status status; |
271 | unsigned int chn_count; | ||
272 | unsigned int chn_real_count; | ||
273 | struct tasklet_struct tasklet; | ||
274 | }; | 269 | }; |
275 | 270 | ||
276 | #define IMX_DMA_SG_LOOP BIT(0) | 271 | #define IMX_DMA_SG_LOOP (1 << 0) |
277 | 272 | ||
278 | #define MAX_DMA_CHANNELS 32 | 273 | #define MAX_DMA_CHANNELS 32 |
279 | #define MXC_SDMA_DEFAULT_PRIORITY 1 | 274 | #define MXC_SDMA_DEFAULT_PRIORITY 1 |
@@ -322,9 +317,7 @@ struct sdma_engine { | |||
322 | struct sdma_context_data *context; | 317 | struct sdma_context_data *context; |
323 | dma_addr_t context_phys; | 318 | dma_addr_t context_phys; |
324 | struct dma_device dma_device; | 319 | struct dma_device dma_device; |
325 | struct clk *clk_ipg; | 320 | struct clk *clk; |
326 | struct clk *clk_ahb; | ||
327 | spinlock_t channel_0_lock; | ||
328 | struct sdma_script_start_addrs *script_addrs; | 321 | struct sdma_script_start_addrs *script_addrs; |
329 | }; | 322 | }; |
330 | 323 | ||
@@ -348,9 +341,9 @@ static const struct of_device_id sdma_dt_ids[] = { | |||
348 | }; | 341 | }; |
349 | MODULE_DEVICE_TABLE(of, sdma_dt_ids); | 342 | MODULE_DEVICE_TABLE(of, sdma_dt_ids); |
350 | 343 | ||
351 | #define SDMA_H_CONFIG_DSPDMA BIT(12) /* indicates if the DSPDMA is used */ | 344 | #define SDMA_H_CONFIG_DSPDMA (1 << 12) /* indicates if the DSPDMA is used */ |
352 | #define SDMA_H_CONFIG_RTD_PINS BIT(11) /* indicates if Real-Time Debug pins are enabled */ | 345 | #define SDMA_H_CONFIG_RTD_PINS (1 << 11) /* indicates if Real-Time Debug pins are enabled */ |
353 | #define SDMA_H_CONFIG_ACR BIT(4) /* indicates if AHB freq /core freq = 2 or 1 */ | 346 | #define SDMA_H_CONFIG_ACR (1 << 4) /* indicates if AHB freq /core freq = 2 or 1 */ |
354 | #define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/ | 347 | #define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/ |
355 | 348 | ||
356 | static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event) | 349 | static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event) |
@@ -365,64 +358,51 @@ static int sdma_config_ownership(struct sdma_channel *sdmac, | |||
365 | { | 358 | { |
366 | struct sdma_engine *sdma = sdmac->sdma; | 359 | struct sdma_engine *sdma = sdmac->sdma; |
367 | int channel = sdmac->channel; | 360 | int channel = sdmac->channel; |
368 | unsigned long evt, mcu, dsp; | 361 | u32 evt, mcu, dsp; |
369 | 362 | ||
370 | if (event_override && mcu_override && dsp_override) | 363 | if (event_override && mcu_override && dsp_override) |
371 | return -EINVAL; | 364 | return -EINVAL; |
372 | 365 | ||
373 | evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR); | 366 | evt = __raw_readl(sdma->regs + SDMA_H_EVTOVR); |
374 | mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR); | 367 | mcu = __raw_readl(sdma->regs + SDMA_H_HOSTOVR); |
375 | dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR); | 368 | dsp = __raw_readl(sdma->regs + SDMA_H_DSPOVR); |
376 | 369 | ||
377 | if (dsp_override) | 370 | if (dsp_override) |
378 | __clear_bit(channel, &dsp); | 371 | dsp &= ~(1 << channel); |
379 | else | 372 | else |
380 | __set_bit(channel, &dsp); | 373 | dsp |= (1 << channel); |
381 | 374 | ||
382 | if (event_override) | 375 | if (event_override) |
383 | __clear_bit(channel, &evt); | 376 | evt &= ~(1 << channel); |
384 | else | 377 | else |
385 | __set_bit(channel, &evt); | 378 | evt |= (1 << channel); |
386 | 379 | ||
387 | if (mcu_override) | 380 | if (mcu_override) |
388 | __clear_bit(channel, &mcu); | 381 | mcu &= ~(1 << channel); |
389 | else | 382 | else |
390 | __set_bit(channel, &mcu); | 383 | mcu |= (1 << channel); |
391 | 384 | ||
392 | writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR); | 385 | __raw_writel(evt, sdma->regs + SDMA_H_EVTOVR); |
393 | writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR); | 386 | __raw_writel(mcu, sdma->regs + SDMA_H_HOSTOVR); |
394 | writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR); | 387 | __raw_writel(dsp, sdma->regs + SDMA_H_DSPOVR); |
395 | 388 | ||
396 | return 0; | 389 | return 0; |
397 | } | 390 | } |
398 | 391 | ||
399 | static void sdma_enable_channel(struct sdma_engine *sdma, int channel) | ||
400 | { | ||
401 | writel(BIT(channel), sdma->regs + SDMA_H_START); | ||
402 | } | ||
403 | |||
404 | /* | 392 | /* |
405 | * sdma_run_channel0 - run a channel and wait till it's done | 393 | * sdma_run_channel - run a channel and wait till it's done |
406 | */ | 394 | */ |
407 | static int sdma_run_channel0(struct sdma_engine *sdma) | 395 | static int sdma_run_channel(struct sdma_channel *sdmac) |
408 | { | 396 | { |
397 | struct sdma_engine *sdma = sdmac->sdma; | ||
398 | int channel = sdmac->channel; | ||
409 | int ret; | 399 | int ret; |
410 | unsigned long timeout = 500; | ||
411 | 400 | ||
412 | sdma_enable_channel(sdma, 0); | 401 | init_completion(&sdmac->done); |
413 | 402 | ||
414 | while (!(ret = readl_relaxed(sdma->regs + SDMA_H_INTR) & 1)) { | 403 | __raw_writel(1 << channel, sdma->regs + SDMA_H_START); |
415 | if (timeout-- <= 0) | ||
416 | break; | ||
417 | udelay(1); | ||
418 | } | ||
419 | 404 | ||
420 | if (ret) { | 405 | ret = wait_for_completion_timeout(&sdmac->done, HZ); |
421 | /* Clear the interrupt status */ | ||
422 | writel_relaxed(ret, sdma->regs + SDMA_H_INTR); | ||
423 | } else { | ||
424 | dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); | ||
425 | } | ||
426 | 406 | ||
427 | return ret ? 0 : -ETIMEDOUT; | 407 | return ret ? 0 : -ETIMEDOUT; |
428 | } | 408 | } |
@@ -434,16 +414,12 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, | |||
434 | void *buf_virt; | 414 | void *buf_virt; |
435 | dma_addr_t buf_phys; | 415 | dma_addr_t buf_phys; |
436 | int ret; | 416 | int ret; |
437 | unsigned long flags; | ||
438 | 417 | ||
439 | buf_virt = dma_alloc_coherent(NULL, | 418 | buf_virt = dma_alloc_coherent(NULL, |
440 | size, | 419 | size, |
441 | &buf_phys, GFP_KERNEL); | 420 | &buf_phys, GFP_KERNEL); |
442 | if (!buf_virt) { | 421 | if (!buf_virt) |
443 | return -ENOMEM; | 422 | return -ENOMEM; |
444 | } | ||
445 | |||
446 | spin_lock_irqsave(&sdma->channel_0_lock, flags); | ||
447 | 423 | ||
448 | bd0->mode.command = C0_SETPM; | 424 | bd0->mode.command = C0_SETPM; |
449 | bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; | 425 | bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; |
@@ -453,9 +429,7 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, | |||
453 | 429 | ||
454 | memcpy(buf_virt, buf, size); | 430 | memcpy(buf_virt, buf, size); |
455 | 431 | ||
456 | ret = sdma_run_channel0(sdma); | 432 | ret = sdma_run_channel(&sdma->channel[0]); |
457 | |||
458 | spin_unlock_irqrestore(&sdma->channel_0_lock, flags); | ||
459 | 433 | ||
460 | dma_free_coherent(NULL, size, buf_virt, buf_phys); | 434 | dma_free_coherent(NULL, size, buf_virt, buf_phys); |
461 | 435 | ||
@@ -466,12 +440,12 @@ static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event) | |||
466 | { | 440 | { |
467 | struct sdma_engine *sdma = sdmac->sdma; | 441 | struct sdma_engine *sdma = sdmac->sdma; |
468 | int channel = sdmac->channel; | 442 | int channel = sdmac->channel; |
469 | unsigned long val; | 443 | u32 val; |
470 | u32 chnenbl = chnenbl_ofs(sdma, event); | 444 | u32 chnenbl = chnenbl_ofs(sdma, event); |
471 | 445 | ||
472 | val = readl_relaxed(sdma->regs + chnenbl); | 446 | val = __raw_readl(sdma->regs + chnenbl); |
473 | __set_bit(channel, &val); | 447 | val |= (1 << channel); |
474 | writel_relaxed(val, sdma->regs + chnenbl); | 448 | __raw_writel(val, sdma->regs + chnenbl); |
475 | } | 449 | } |
476 | 450 | ||
477 | static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event) | 451 | static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event) |
@@ -479,11 +453,11 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event) | |||
479 | struct sdma_engine *sdma = sdmac->sdma; | 453 | struct sdma_engine *sdma = sdmac->sdma; |
480 | int channel = sdmac->channel; | 454 | int channel = sdmac->channel; |
481 | u32 chnenbl = chnenbl_ofs(sdma, event); | 455 | u32 chnenbl = chnenbl_ofs(sdma, event); |
482 | unsigned long val; | 456 | u32 val; |
483 | 457 | ||
484 | val = readl_relaxed(sdma->regs + chnenbl); | 458 | val = __raw_readl(sdma->regs + chnenbl); |
485 | __clear_bit(channel, &val); | 459 | val &= ~(1 << channel); |
486 | writel_relaxed(val, sdma->regs + chnenbl); | 460 | __raw_writel(val, sdma->regs + chnenbl); |
487 | } | 461 | } |
488 | 462 | ||
489 | static void sdma_handle_channel_loop(struct sdma_channel *sdmac) | 463 | static void sdma_handle_channel_loop(struct sdma_channel *sdmac) |
@@ -519,7 +493,6 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) | |||
519 | struct sdma_buffer_descriptor *bd; | 493 | struct sdma_buffer_descriptor *bd; |
520 | int i, error = 0; | 494 | int i, error = 0; |
521 | 495 | ||
522 | sdmac->chn_real_count = 0; | ||
523 | /* | 496 | /* |
524 | * non loop mode. Iterate over all descriptors, collect | 497 | * non loop mode. Iterate over all descriptors, collect |
525 | * errors and call callback function | 498 | * errors and call callback function |
@@ -529,7 +502,6 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) | |||
529 | 502 | ||
530 | if (bd->mode.status & (BD_DONE | BD_RROR)) | 503 | if (bd->mode.status & (BD_DONE | BD_RROR)) |
531 | error = -EIO; | 504 | error = -EIO; |
532 | sdmac->chn_real_count += bd->mode.count; | ||
533 | } | 505 | } |
534 | 506 | ||
535 | if (error) | 507 | if (error) |
@@ -537,17 +509,19 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) | |||
537 | else | 509 | else |
538 | sdmac->status = DMA_SUCCESS; | 510 | sdmac->status = DMA_SUCCESS; |
539 | 511 | ||
540 | dma_cookie_complete(&sdmac->desc); | ||
541 | if (sdmac->desc.callback) | 512 | if (sdmac->desc.callback) |
542 | sdmac->desc.callback(sdmac->desc.callback_param); | 513 | sdmac->desc.callback(sdmac->desc.callback_param); |
514 | sdmac->last_completed = sdmac->desc.cookie; | ||
543 | } | 515 | } |
544 | 516 | ||
545 | static void sdma_tasklet(unsigned long data) | 517 | static void mxc_sdma_handle_channel(struct sdma_channel *sdmac) |
546 | { | 518 | { |
547 | struct sdma_channel *sdmac = (struct sdma_channel *) data; | ||
548 | |||
549 | complete(&sdmac->done); | 519 | complete(&sdmac->done); |
550 | 520 | ||
521 | /* not interested in channel 0 interrupts */ | ||
522 | if (sdmac->channel == 0) | ||
523 | return; | ||
524 | |||
551 | if (sdmac->flags & IMX_DMA_SG_LOOP) | 525 | if (sdmac->flags & IMX_DMA_SG_LOOP) |
552 | sdma_handle_channel_loop(sdmac); | 526 | sdma_handle_channel_loop(sdmac); |
553 | else | 527 | else |
@@ -557,20 +531,18 @@ static void sdma_tasklet(unsigned long data) | |||
557 | static irqreturn_t sdma_int_handler(int irq, void *dev_id) | 531 | static irqreturn_t sdma_int_handler(int irq, void *dev_id) |
558 | { | 532 | { |
559 | struct sdma_engine *sdma = dev_id; | 533 | struct sdma_engine *sdma = dev_id; |
560 | unsigned long stat; | 534 | u32 stat; |
561 | 535 | ||
562 | stat = readl_relaxed(sdma->regs + SDMA_H_INTR); | 536 | stat = __raw_readl(sdma->regs + SDMA_H_INTR); |
563 | /* not interested in channel 0 interrupts */ | 537 | __raw_writel(stat, sdma->regs + SDMA_H_INTR); |
564 | stat &= ~1; | ||
565 | writel_relaxed(stat, sdma->regs + SDMA_H_INTR); | ||
566 | 538 | ||
567 | while (stat) { | 539 | while (stat) { |
568 | int channel = fls(stat) - 1; | 540 | int channel = fls(stat) - 1; |
569 | struct sdma_channel *sdmac = &sdma->channel[channel]; | 541 | struct sdma_channel *sdmac = &sdma->channel[channel]; |
570 | 542 | ||
571 | tasklet_schedule(&sdmac->tasklet); | 543 | mxc_sdma_handle_channel(sdmac); |
572 | 544 | ||
573 | __clear_bit(channel, &stat); | 545 | stat &= ~(1 << channel); |
574 | } | 546 | } |
575 | 547 | ||
576 | return IRQ_HANDLED; | 548 | return IRQ_HANDLED; |
@@ -667,9 +639,8 @@ static int sdma_load_context(struct sdma_channel *sdmac) | |||
667 | struct sdma_context_data *context = sdma->context; | 639 | struct sdma_context_data *context = sdma->context; |
668 | struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; | 640 | struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; |
669 | int ret; | 641 | int ret; |
670 | unsigned long flags; | ||
671 | 642 | ||
672 | if (sdmac->direction == DMA_DEV_TO_MEM) { | 643 | if (sdmac->direction == DMA_FROM_DEVICE) { |
673 | load_address = sdmac->pc_from_device; | 644 | load_address = sdmac->pc_from_device; |
674 | } else { | 645 | } else { |
675 | load_address = sdmac->pc_to_device; | 646 | load_address = sdmac->pc_to_device; |
@@ -679,13 +650,11 @@ static int sdma_load_context(struct sdma_channel *sdmac) | |||
679 | return load_address; | 650 | return load_address; |
680 | 651 | ||
681 | dev_dbg(sdma->dev, "load_address = %d\n", load_address); | 652 | dev_dbg(sdma->dev, "load_address = %d\n", load_address); |
682 | dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level); | 653 | dev_dbg(sdma->dev, "wml = 0x%08x\n", sdmac->watermark_level); |
683 | dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr); | 654 | dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr); |
684 | dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr); | 655 | dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr); |
685 | dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]); | 656 | dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0); |
686 | dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]); | 657 | dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1); |
687 | |||
688 | spin_lock_irqsave(&sdma->channel_0_lock, flags); | ||
689 | 658 | ||
690 | memset(context, 0, sizeof(*context)); | 659 | memset(context, 0, sizeof(*context)); |
691 | context->channel_state.pc = load_address; | 660 | context->channel_state.pc = load_address; |
@@ -693,8 +662,8 @@ static int sdma_load_context(struct sdma_channel *sdmac) | |||
693 | /* Send by context the event mask,base address for peripheral | 662 | /* Send by context the event mask,base address for peripheral |
694 | * and watermark level | 663 | * and watermark level |
695 | */ | 664 | */ |
696 | context->gReg[0] = sdmac->event_mask[1]; | 665 | context->gReg[0] = sdmac->event_mask1; |
697 | context->gReg[1] = sdmac->event_mask[0]; | 666 | context->gReg[1] = sdmac->event_mask0; |
698 | context->gReg[2] = sdmac->per_addr; | 667 | context->gReg[2] = sdmac->per_addr; |
699 | context->gReg[6] = sdmac->shp_addr; | 668 | context->gReg[6] = sdmac->shp_addr; |
700 | context->gReg[7] = sdmac->watermark_level; | 669 | context->gReg[7] = sdmac->watermark_level; |
@@ -704,9 +673,8 @@ static int sdma_load_context(struct sdma_channel *sdmac) | |||
704 | bd0->mode.count = sizeof(*context) / 4; | 673 | bd0->mode.count = sizeof(*context) / 4; |
705 | bd0->buffer_addr = sdma->context_phys; | 674 | bd0->buffer_addr = sdma->context_phys; |
706 | bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel; | 675 | bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel; |
707 | ret = sdma_run_channel0(sdma); | ||
708 | 676 | ||
709 | spin_unlock_irqrestore(&sdma->channel_0_lock, flags); | 677 | ret = sdma_run_channel(&sdma->channel[0]); |
710 | 678 | ||
711 | return ret; | 679 | return ret; |
712 | } | 680 | } |
@@ -716,7 +684,7 @@ static void sdma_disable_channel(struct sdma_channel *sdmac) | |||
716 | struct sdma_engine *sdma = sdmac->sdma; | 684 | struct sdma_engine *sdma = sdmac->sdma; |
717 | int channel = sdmac->channel; | 685 | int channel = sdmac->channel; |
718 | 686 | ||
719 | writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP); | 687 | __raw_writel(1 << channel, sdma->regs + SDMA_H_STATSTOP); |
720 | sdmac->status = DMA_ERROR; | 688 | sdmac->status = DMA_ERROR; |
721 | } | 689 | } |
722 | 690 | ||
@@ -726,13 +694,13 @@ static int sdma_config_channel(struct sdma_channel *sdmac) | |||
726 | 694 | ||
727 | sdma_disable_channel(sdmac); | 695 | sdma_disable_channel(sdmac); |
728 | 696 | ||
729 | sdmac->event_mask[0] = 0; | 697 | sdmac->event_mask0 = 0; |
730 | sdmac->event_mask[1] = 0; | 698 | sdmac->event_mask1 = 0; |
731 | sdmac->shp_addr = 0; | 699 | sdmac->shp_addr = 0; |
732 | sdmac->per_addr = 0; | 700 | sdmac->per_addr = 0; |
733 | 701 | ||
734 | if (sdmac->event_id0) { | 702 | if (sdmac->event_id0) { |
735 | if (sdmac->event_id0 >= sdmac->sdma->num_events) | 703 | if (sdmac->event_id0 > 32) |
736 | return -EINVAL; | 704 | return -EINVAL; |
737 | sdma_event_enable(sdmac, sdmac->event_id0); | 705 | sdma_event_enable(sdmac, sdmac->event_id0); |
738 | } | 706 | } |
@@ -755,14 +723,15 @@ static int sdma_config_channel(struct sdma_channel *sdmac) | |||
755 | (sdmac->peripheral_type != IMX_DMATYPE_DSP)) { | 723 | (sdmac->peripheral_type != IMX_DMATYPE_DSP)) { |
756 | /* Handle multiple event channels differently */ | 724 | /* Handle multiple event channels differently */ |
757 | if (sdmac->event_id1) { | 725 | if (sdmac->event_id1) { |
758 | sdmac->event_mask[1] = BIT(sdmac->event_id1 % 32); | 726 | sdmac->event_mask1 = 1 << (sdmac->event_id1 % 32); |
759 | if (sdmac->event_id1 > 31) | 727 | if (sdmac->event_id1 > 31) |
760 | __set_bit(31, &sdmac->watermark_level); | 728 | sdmac->watermark_level |= 1 << 31; |
761 | sdmac->event_mask[0] = BIT(sdmac->event_id0 % 32); | 729 | sdmac->event_mask0 = 1 << (sdmac->event_id0 % 32); |
762 | if (sdmac->event_id0 > 31) | 730 | if (sdmac->event_id0 > 31) |
763 | __set_bit(30, &sdmac->watermark_level); | 731 | sdmac->watermark_level |= 1 << 30; |
764 | } else { | 732 | } else { |
765 | __set_bit(sdmac->event_id0, sdmac->event_mask); | 733 | sdmac->event_mask0 = 1 << sdmac->event_id0; |
734 | sdmac->event_mask1 = 1 << (sdmac->event_id0 - 32); | ||
766 | } | 735 | } |
767 | /* Watermark Level */ | 736 | /* Watermark Level */ |
768 | sdmac->watermark_level |= sdmac->watermark_level; | 737 | sdmac->watermark_level |= sdmac->watermark_level; |
@@ -788,7 +757,7 @@ static int sdma_set_channel_priority(struct sdma_channel *sdmac, | |||
788 | return -EINVAL; | 757 | return -EINVAL; |
789 | } | 758 | } |
790 | 759 | ||
791 | writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel); | 760 | __raw_writel(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel); |
792 | 761 | ||
793 | return 0; | 762 | return 0; |
794 | } | 763 | } |
@@ -810,16 +779,38 @@ static int sdma_request_channel(struct sdma_channel *sdmac) | |||
810 | sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys; | 779 | sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys; |
811 | sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; | 780 | sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; |
812 | 781 | ||
782 | clk_enable(sdma->clk); | ||
783 | |||
813 | sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY); | 784 | sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY); |
814 | 785 | ||
815 | init_completion(&sdmac->done); | 786 | init_completion(&sdmac->done); |
816 | 787 | ||
788 | sdmac->buf_tail = 0; | ||
789 | |||
817 | return 0; | 790 | return 0; |
818 | out: | 791 | out: |
819 | 792 | ||
820 | return ret; | 793 | return ret; |
821 | } | 794 | } |
822 | 795 | ||
796 | static void sdma_enable_channel(struct sdma_engine *sdma, int channel) | ||
797 | { | ||
798 | __raw_writel(1 << channel, sdma->regs + SDMA_H_START); | ||
799 | } | ||
800 | |||
801 | static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac) | ||
802 | { | ||
803 | dma_cookie_t cookie = sdmac->chan.cookie; | ||
804 | |||
805 | if (++cookie < 0) | ||
806 | cookie = 1; | ||
807 | |||
808 | sdmac->chan.cookie = cookie; | ||
809 | sdmac->desc.cookie = cookie; | ||
810 | |||
811 | return cookie; | ||
812 | } | ||
813 | |||
823 | static struct sdma_channel *to_sdma_chan(struct dma_chan *chan) | 814 | static struct sdma_channel *to_sdma_chan(struct dma_chan *chan) |
824 | { | 815 | { |
825 | return container_of(chan, struct sdma_channel, chan); | 816 | return container_of(chan, struct sdma_channel, chan); |
@@ -827,15 +818,17 @@ static struct sdma_channel *to_sdma_chan(struct dma_chan *chan) | |||
827 | 818 | ||
828 | static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) | 819 | static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) |
829 | { | 820 | { |
830 | unsigned long flags; | ||
831 | struct sdma_channel *sdmac = to_sdma_chan(tx->chan); | 821 | struct sdma_channel *sdmac = to_sdma_chan(tx->chan); |
822 | struct sdma_engine *sdma = sdmac->sdma; | ||
832 | dma_cookie_t cookie; | 823 | dma_cookie_t cookie; |
833 | 824 | ||
834 | spin_lock_irqsave(&sdmac->lock, flags); | 825 | spin_lock_irq(&sdmac->lock); |
835 | 826 | ||
836 | cookie = dma_cookie_assign(tx); | 827 | cookie = sdma_assign_cookie(sdmac); |
837 | 828 | ||
838 | spin_unlock_irqrestore(&sdmac->lock, flags); | 829 | sdma_enable_channel(sdma, sdmac->channel); |
830 | |||
831 | spin_unlock_irq(&sdmac->lock); | ||
839 | 832 | ||
840 | return cookie; | 833 | return cookie; |
841 | } | 834 | } |
@@ -864,15 +857,11 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan) | |||
864 | 857 | ||
865 | sdmac->peripheral_type = data->peripheral_type; | 858 | sdmac->peripheral_type = data->peripheral_type; |
866 | sdmac->event_id0 = data->dma_request; | 859 | sdmac->event_id0 = data->dma_request; |
867 | 860 | ret = sdma_set_channel_priority(sdmac, prio); | |
868 | clk_enable(sdmac->sdma->clk_ipg); | ||
869 | clk_enable(sdmac->sdma->clk_ahb); | ||
870 | |||
871 | ret = sdma_request_channel(sdmac); | ||
872 | if (ret) | 861 | if (ret) |
873 | return ret; | 862 | return ret; |
874 | 863 | ||
875 | ret = sdma_set_channel_priority(sdmac, prio); | 864 | ret = sdma_request_channel(sdmac); |
876 | if (ret) | 865 | if (ret) |
877 | return ret; | 866 | return ret; |
878 | 867 | ||
@@ -903,14 +892,13 @@ static void sdma_free_chan_resources(struct dma_chan *chan) | |||
903 | 892 | ||
904 | dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys); | 893 | dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys); |
905 | 894 | ||
906 | clk_disable(sdma->clk_ipg); | 895 | clk_disable(sdma->clk); |
907 | clk_disable(sdma->clk_ahb); | ||
908 | } | 896 | } |
909 | 897 | ||
910 | static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | 898 | static struct dma_async_tx_descriptor *sdma_prep_slave_sg( |
911 | struct dma_chan *chan, struct scatterlist *sgl, | 899 | struct dma_chan *chan, struct scatterlist *sgl, |
912 | unsigned int sg_len, enum dma_transfer_direction direction, | 900 | unsigned int sg_len, enum dma_data_direction direction, |
913 | unsigned long flags, void *context) | 901 | unsigned long flags) |
914 | { | 902 | { |
915 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 903 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
916 | struct sdma_engine *sdma = sdmac->sdma; | 904 | struct sdma_engine *sdma = sdmac->sdma; |
@@ -924,8 +912,6 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | |||
924 | 912 | ||
925 | sdmac->flags = 0; | 913 | sdmac->flags = 0; |
926 | 914 | ||
927 | sdmac->buf_tail = 0; | ||
928 | |||
929 | dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n", | 915 | dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n", |
930 | sg_len, channel); | 916 | sg_len, channel); |
931 | 917 | ||
@@ -941,14 +927,13 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | |||
941 | goto err_out; | 927 | goto err_out; |
942 | } | 928 | } |
943 | 929 | ||
944 | sdmac->chn_count = 0; | ||
945 | for_each_sg(sgl, sg, sg_len, i) { | 930 | for_each_sg(sgl, sg, sg_len, i) { |
946 | struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; | 931 | struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; |
947 | int param; | 932 | int param; |
948 | 933 | ||
949 | bd->buffer_addr = sg->dma_address; | 934 | bd->buffer_addr = sg->dma_address; |
950 | 935 | ||
951 | count = sg_dma_len(sg); | 936 | count = sg->length; |
952 | 937 | ||
953 | if (count > 0xffff) { | 938 | if (count > 0xffff) { |
954 | dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n", | 939 | dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n", |
@@ -958,7 +943,6 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | |||
958 | } | 943 | } |
959 | 944 | ||
960 | bd->mode.count = count; | 945 | bd->mode.count = count; |
961 | sdmac->chn_count += count; | ||
962 | 946 | ||
963 | if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) { | 947 | if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) { |
964 | ret = -EINVAL; | 948 | ret = -EINVAL; |
@@ -1010,8 +994,7 @@ err_out: | |||
1010 | 994 | ||
1011 | static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( | 995 | static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( |
1012 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | 996 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, |
1013 | size_t period_len, enum dma_transfer_direction direction, | 997 | size_t period_len, enum dma_data_direction direction) |
1014 | unsigned long flags, void *context) | ||
1015 | { | 998 | { |
1016 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 999 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
1017 | struct sdma_engine *sdma = sdmac->sdma; | 1000 | struct sdma_engine *sdma = sdmac->sdma; |
@@ -1026,8 +1009,6 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( | |||
1026 | 1009 | ||
1027 | sdmac->status = DMA_IN_PROGRESS; | 1010 | sdmac->status = DMA_IN_PROGRESS; |
1028 | 1011 | ||
1029 | sdmac->buf_tail = 0; | ||
1030 | |||
1031 | sdmac->flags |= IMX_DMA_SG_LOOP; | 1012 | sdmac->flags |= IMX_DMA_SG_LOOP; |
1032 | sdmac->direction = direction; | 1013 | sdmac->direction = direction; |
1033 | ret = sdma_load_context(sdmac); | 1014 | ret = sdma_load_context(sdmac); |
@@ -1098,18 +1079,15 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1098 | sdma_disable_channel(sdmac); | 1079 | sdma_disable_channel(sdmac); |
1099 | return 0; | 1080 | return 0; |
1100 | case DMA_SLAVE_CONFIG: | 1081 | case DMA_SLAVE_CONFIG: |
1101 | if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { | 1082 | if (dmaengine_cfg->direction == DMA_FROM_DEVICE) { |
1102 | sdmac->per_address = dmaengine_cfg->src_addr; | 1083 | sdmac->per_address = dmaengine_cfg->src_addr; |
1103 | sdmac->watermark_level = dmaengine_cfg->src_maxburst * | 1084 | sdmac->watermark_level = dmaengine_cfg->src_maxburst; |
1104 | dmaengine_cfg->src_addr_width; | ||
1105 | sdmac->word_size = dmaengine_cfg->src_addr_width; | 1085 | sdmac->word_size = dmaengine_cfg->src_addr_width; |
1106 | } else { | 1086 | } else { |
1107 | sdmac->per_address = dmaengine_cfg->dst_addr; | 1087 | sdmac->per_address = dmaengine_cfg->dst_addr; |
1108 | sdmac->watermark_level = dmaengine_cfg->dst_maxburst * | 1088 | sdmac->watermark_level = dmaengine_cfg->dst_maxburst; |
1109 | dmaengine_cfg->dst_addr_width; | ||
1110 | sdmac->word_size = dmaengine_cfg->dst_addr_width; | 1089 | sdmac->word_size = dmaengine_cfg->dst_addr_width; |
1111 | } | 1090 | } |
1112 | sdmac->direction = dmaengine_cfg->direction; | ||
1113 | return sdma_config_channel(sdmac); | 1091 | return sdma_config_channel(sdmac); |
1114 | default: | 1092 | default: |
1115 | return -ENOSYS; | 1093 | return -ENOSYS; |
@@ -1127,19 +1105,16 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan, | |||
1127 | 1105 | ||
1128 | last_used = chan->cookie; | 1106 | last_used = chan->cookie; |
1129 | 1107 | ||
1130 | dma_set_tx_state(txstate, chan->completed_cookie, last_used, | 1108 | dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0); |
1131 | sdmac->chn_count - sdmac->chn_real_count); | ||
1132 | 1109 | ||
1133 | return sdmac->status; | 1110 | return sdmac->status; |
1134 | } | 1111 | } |
1135 | 1112 | ||
1136 | static void sdma_issue_pending(struct dma_chan *chan) | 1113 | static void sdma_issue_pending(struct dma_chan *chan) |
1137 | { | 1114 | { |
1138 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 1115 | /* |
1139 | struct sdma_engine *sdma = sdmac->sdma; | 1116 | * Nothing to do. We only have a single descriptor |
1140 | 1117 | */ | |
1141 | if (sdmac->status == DMA_IN_PROGRESS) | ||
1142 | sdma_enable_channel(sdma, sdmac->channel); | ||
1143 | } | 1118 | } |
1144 | 1119 | ||
1145 | #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 | 1120 | #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 |
@@ -1156,17 +1131,18 @@ static void sdma_add_scripts(struct sdma_engine *sdma, | |||
1156 | saddr_arr[i] = addr_arr[i]; | 1131 | saddr_arr[i] = addr_arr[i]; |
1157 | } | 1132 | } |
1158 | 1133 | ||
1159 | static void sdma_load_firmware(const struct firmware *fw, void *context) | 1134 | static int __init sdma_get_firmware(struct sdma_engine *sdma, |
1135 | const char *fw_name) | ||
1160 | { | 1136 | { |
1161 | struct sdma_engine *sdma = context; | 1137 | const struct firmware *fw; |
1162 | const struct sdma_firmware_header *header; | 1138 | const struct sdma_firmware_header *header; |
1139 | int ret; | ||
1163 | const struct sdma_script_start_addrs *addr; | 1140 | const struct sdma_script_start_addrs *addr; |
1164 | unsigned short *ram_code; | 1141 | unsigned short *ram_code; |
1165 | 1142 | ||
1166 | if (!fw) { | 1143 | ret = request_firmware(&fw, fw_name, sdma->dev); |
1167 | dev_err(sdma->dev, "firmware not found\n"); | 1144 | if (ret) |
1168 | return; | 1145 | return ret; |
1169 | } | ||
1170 | 1146 | ||
1171 | if (fw->size < sizeof(*header)) | 1147 | if (fw->size < sizeof(*header)) |
1172 | goto err_firmware; | 1148 | goto err_firmware; |
@@ -1181,14 +1157,12 @@ static void sdma_load_firmware(const struct firmware *fw, void *context) | |||
1181 | addr = (void *)header + header->script_addrs_start; | 1157 | addr = (void *)header + header->script_addrs_start; |
1182 | ram_code = (void *)header + header->ram_code_start; | 1158 | ram_code = (void *)header + header->ram_code_start; |
1183 | 1159 | ||
1184 | clk_enable(sdma->clk_ipg); | 1160 | clk_enable(sdma->clk); |
1185 | clk_enable(sdma->clk_ahb); | ||
1186 | /* download the RAM image for SDMA */ | 1161 | /* download the RAM image for SDMA */ |
1187 | sdma_load_script(sdma, ram_code, | 1162 | sdma_load_script(sdma, ram_code, |
1188 | header->ram_code_size, | 1163 | header->ram_code_size, |
1189 | addr->ram_code_start_addr); | 1164 | addr->ram_code_start_addr); |
1190 | clk_disable(sdma->clk_ipg); | 1165 | clk_disable(sdma->clk); |
1191 | clk_disable(sdma->clk_ahb); | ||
1192 | 1166 | ||
1193 | sdma_add_scripts(sdma, addr); | 1167 | sdma_add_scripts(sdma, addr); |
1194 | 1168 | ||
@@ -1198,16 +1172,6 @@ static void sdma_load_firmware(const struct firmware *fw, void *context) | |||
1198 | 1172 | ||
1199 | err_firmware: | 1173 | err_firmware: |
1200 | release_firmware(fw); | 1174 | release_firmware(fw); |
1201 | } | ||
1202 | |||
1203 | static int __init sdma_get_firmware(struct sdma_engine *sdma, | ||
1204 | const char *fw_name) | ||
1205 | { | ||
1206 | int ret; | ||
1207 | |||
1208 | ret = request_firmware_nowait(THIS_MODULE, | ||
1209 | FW_ACTION_HOTPLUG, fw_name, sdma->dev, | ||
1210 | GFP_KERNEL, sdma, sdma_load_firmware); | ||
1211 | 1175 | ||
1212 | return ret; | 1176 | return ret; |
1213 | } | 1177 | } |
@@ -1230,11 +1194,10 @@ static int __init sdma_init(struct sdma_engine *sdma) | |||
1230 | return -ENODEV; | 1194 | return -ENODEV; |
1231 | } | 1195 | } |
1232 | 1196 | ||
1233 | clk_enable(sdma->clk_ipg); | 1197 | clk_enable(sdma->clk); |
1234 | clk_enable(sdma->clk_ahb); | ||
1235 | 1198 | ||
1236 | /* Be sure SDMA has not started yet */ | 1199 | /* Be sure SDMA has not started yet */ |
1237 | writel_relaxed(0, sdma->regs + SDMA_H_C0PTR); | 1200 | __raw_writel(0, sdma->regs + SDMA_H_C0PTR); |
1238 | 1201 | ||
1239 | sdma->channel_control = dma_alloc_coherent(NULL, | 1202 | sdma->channel_control = dma_alloc_coherent(NULL, |
1240 | MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) + | 1203 | MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) + |
@@ -1257,11 +1220,11 @@ static int __init sdma_init(struct sdma_engine *sdma) | |||
1257 | 1220 | ||
1258 | /* disable all channels */ | 1221 | /* disable all channels */ |
1259 | for (i = 0; i < sdma->num_events; i++) | 1222 | for (i = 0; i < sdma->num_events; i++) |
1260 | writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i)); | 1223 | __raw_writel(0, sdma->regs + chnenbl_ofs(sdma, i)); |
1261 | 1224 | ||
1262 | /* All channels have priority 0 */ | 1225 | /* All channels have priority 0 */ |
1263 | for (i = 0; i < MAX_DMA_CHANNELS; i++) | 1226 | for (i = 0; i < MAX_DMA_CHANNELS; i++) |
1264 | writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4); | 1227 | __raw_writel(0, sdma->regs + SDMA_CHNPRI_0 + i * 4); |
1265 | 1228 | ||
1266 | ret = sdma_request_channel(&sdma->channel[0]); | 1229 | ret = sdma_request_channel(&sdma->channel[0]); |
1267 | if (ret) | 1230 | if (ret) |
@@ -1270,28 +1233,26 @@ static int __init sdma_init(struct sdma_engine *sdma) | |||
1270 | sdma_config_ownership(&sdma->channel[0], false, true, false); | 1233 | sdma_config_ownership(&sdma->channel[0], false, true, false); |
1271 | 1234 | ||
1272 | /* Set Command Channel (Channel Zero) */ | 1235 | /* Set Command Channel (Channel Zero) */ |
1273 | writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR); | 1236 | __raw_writel(0x4050, sdma->regs + SDMA_CHN0ADDR); |
1274 | 1237 | ||
1275 | /* Set bits of CONFIG register but with static context switching */ | 1238 | /* Set bits of CONFIG register but with static context switching */ |
1276 | /* FIXME: Check whether to set ACR bit depending on clock ratios */ | 1239 | /* FIXME: Check whether to set ACR bit depending on clock ratios */ |
1277 | writel_relaxed(0, sdma->regs + SDMA_H_CONFIG); | 1240 | __raw_writel(0, sdma->regs + SDMA_H_CONFIG); |
1278 | 1241 | ||
1279 | writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR); | 1242 | __raw_writel(ccb_phys, sdma->regs + SDMA_H_C0PTR); |
1280 | 1243 | ||
1281 | /* Set bits of CONFIG register with given context switching mode */ | 1244 | /* Set bits of CONFIG register with given context switching mode */ |
1282 | writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); | 1245 | __raw_writel(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); |
1283 | 1246 | ||
1284 | /* Initializes channel's priorities */ | 1247 | /* Initializes channel's priorities */ |
1285 | sdma_set_channel_priority(&sdma->channel[0], 7); | 1248 | sdma_set_channel_priority(&sdma->channel[0], 7); |
1286 | 1249 | ||
1287 | clk_disable(sdma->clk_ipg); | 1250 | clk_disable(sdma->clk); |
1288 | clk_disable(sdma->clk_ahb); | ||
1289 | 1251 | ||
1290 | return 0; | 1252 | return 0; |
1291 | 1253 | ||
1292 | err_dma_alloc: | 1254 | err_dma_alloc: |
1293 | clk_disable(sdma->clk_ipg); | 1255 | clk_disable(sdma->clk); |
1294 | clk_disable(sdma->clk_ahb); | ||
1295 | dev_err(sdma->dev, "initialisation failed with %d\n", ret); | 1256 | dev_err(sdma->dev, "initialisation failed with %d\n", ret); |
1296 | return ret; | 1257 | return ret; |
1297 | } | 1258 | } |
@@ -1308,14 +1269,11 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1308 | struct sdma_platform_data *pdata = pdev->dev.platform_data; | 1269 | struct sdma_platform_data *pdata = pdev->dev.platform_data; |
1309 | int i; | 1270 | int i; |
1310 | struct sdma_engine *sdma; | 1271 | struct sdma_engine *sdma; |
1311 | s32 *saddr_arr; | ||
1312 | 1272 | ||
1313 | sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); | 1273 | sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); |
1314 | if (!sdma) | 1274 | if (!sdma) |
1315 | return -ENOMEM; | 1275 | return -ENOMEM; |
1316 | 1276 | ||
1317 | spin_lock_init(&sdma->channel_0_lock); | ||
1318 | |||
1319 | sdma->dev = &pdev->dev; | 1277 | sdma->dev = &pdev->dev; |
1320 | 1278 | ||
1321 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1279 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
@@ -1330,21 +1288,12 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1330 | goto err_request_region; | 1288 | goto err_request_region; |
1331 | } | 1289 | } |
1332 | 1290 | ||
1333 | sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); | 1291 | sdma->clk = clk_get(&pdev->dev, NULL); |
1334 | if (IS_ERR(sdma->clk_ipg)) { | 1292 | if (IS_ERR(sdma->clk)) { |
1335 | ret = PTR_ERR(sdma->clk_ipg); | 1293 | ret = PTR_ERR(sdma->clk); |
1336 | goto err_clk; | 1294 | goto err_clk; |
1337 | } | 1295 | } |
1338 | 1296 | ||
1339 | sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); | ||
1340 | if (IS_ERR(sdma->clk_ahb)) { | ||
1341 | ret = PTR_ERR(sdma->clk_ahb); | ||
1342 | goto err_clk; | ||
1343 | } | ||
1344 | |||
1345 | clk_prepare(sdma->clk_ipg); | ||
1346 | clk_prepare(sdma->clk_ahb); | ||
1347 | |||
1348 | sdma->regs = ioremap(iores->start, resource_size(iores)); | 1297 | sdma->regs = ioremap(iores->start, resource_size(iores)); |
1349 | if (!sdma->regs) { | 1298 | if (!sdma->regs) { |
1350 | ret = -ENOMEM; | 1299 | ret = -ENOMEM; |
@@ -1361,11 +1310,6 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1361 | goto err_alloc; | 1310 | goto err_alloc; |
1362 | } | 1311 | } |
1363 | 1312 | ||
1364 | /* initially no scripts available */ | ||
1365 | saddr_arr = (s32 *)sdma->script_addrs; | ||
1366 | for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++) | ||
1367 | saddr_arr[i] = -EINVAL; | ||
1368 | |||
1369 | if (of_id) | 1313 | if (of_id) |
1370 | pdev->id_entry = of_id->data; | 1314 | pdev->id_entry = of_id->data; |
1371 | sdma->devtype = pdev->id_entry->driver_data; | 1315 | sdma->devtype = pdev->id_entry->driver_data; |
@@ -1382,11 +1326,8 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1382 | spin_lock_init(&sdmac->lock); | 1326 | spin_lock_init(&sdmac->lock); |
1383 | 1327 | ||
1384 | sdmac->chan.device = &sdma->dma_device; | 1328 | sdmac->chan.device = &sdma->dma_device; |
1385 | dma_cookie_init(&sdmac->chan); | ||
1386 | sdmac->channel = i; | 1329 | sdmac->channel = i; |
1387 | 1330 | ||
1388 | tasklet_init(&sdmac->tasklet, sdma_tasklet, | ||
1389 | (unsigned long) sdmac); | ||
1390 | /* | 1331 | /* |
1391 | * Add the channel to the DMAC list. Do not add channel 0 though | 1332 | * Add the channel to the DMAC list. Do not add channel 0 though |
1392 | * because we need it internally in the SDMA driver. This also means | 1333 | * because we need it internally in the SDMA driver. This also means |
@@ -1405,9 +1346,7 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1405 | sdma_add_scripts(sdma, pdata->script_addrs); | 1346 | sdma_add_scripts(sdma, pdata->script_addrs); |
1406 | 1347 | ||
1407 | if (pdata) { | 1348 | if (pdata) { |
1408 | ret = sdma_get_firmware(sdma, pdata->fw_name); | 1349 | sdma_get_firmware(sdma, pdata->fw_name); |
1409 | if (ret) | ||
1410 | dev_warn(&pdev->dev, "failed to get firmware from platform data\n"); | ||
1411 | } else { | 1350 | } else { |
1412 | /* | 1351 | /* |
1413 | * Because that device tree does not encode ROM script address, | 1352 | * Because that device tree does not encode ROM script address, |
@@ -1416,12 +1355,15 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1416 | */ | 1355 | */ |
1417 | ret = of_property_read_string(np, "fsl,sdma-ram-script-name", | 1356 | ret = of_property_read_string(np, "fsl,sdma-ram-script-name", |
1418 | &fw_name); | 1357 | &fw_name); |
1419 | if (ret) | 1358 | if (ret) { |
1420 | dev_warn(&pdev->dev, "failed to get firmware name\n"); | 1359 | dev_err(&pdev->dev, "failed to get firmware name\n"); |
1421 | else { | 1360 | goto err_init; |
1422 | ret = sdma_get_firmware(sdma, fw_name); | 1361 | } |
1423 | if (ret) | 1362 | |
1424 | dev_warn(&pdev->dev, "failed to get firmware from device tree\n"); | 1363 | ret = sdma_get_firmware(sdma, fw_name); |
1364 | if (ret) { | ||
1365 | dev_err(&pdev->dev, "failed to get firmware\n"); | ||
1366 | goto err_init; | ||
1425 | } | 1367 | } |
1426 | } | 1368 | } |
1427 | 1369 | ||
@@ -1454,6 +1396,7 @@ err_alloc: | |||
1454 | err_request_irq: | 1396 | err_request_irq: |
1455 | iounmap(sdma->regs); | 1397 | iounmap(sdma->regs); |
1456 | err_ioremap: | 1398 | err_ioremap: |
1399 | clk_put(sdma->clk); | ||
1457 | err_clk: | 1400 | err_clk: |
1458 | release_mem_region(iores->start, resource_size(iores)); | 1401 | release_mem_region(iores->start, resource_size(iores)); |
1459 | err_request_region: | 1402 | err_request_region: |
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index a0de82e21a7..8a3fdd87db9 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c | |||
@@ -27,9 +27,6 @@ | |||
27 | #include <linux/interrupt.h> | 27 | #include <linux/interrupt.h> |
28 | #include <linux/pm_runtime.h> | 28 | #include <linux/pm_runtime.h> |
29 | #include <linux/intel_mid_dma.h> | 29 | #include <linux/intel_mid_dma.h> |
30 | #include <linux/module.h> | ||
31 | |||
32 | #include "dmaengine.h" | ||
33 | 30 | ||
34 | #define MAX_CHAN 4 /*max ch across controllers*/ | 31 | #define MAX_CHAN 4 /*max ch across controllers*/ |
35 | #include "intel_mid_dma_regs.h" | 32 | #include "intel_mid_dma_regs.h" |
@@ -118,15 +115,16 @@ DMAC1 interrupt Functions*/ | |||
118 | 115 | ||
119 | /** | 116 | /** |
120 | * dmac1_mask_periphral_intr - mask the periphral interrupt | 117 | * dmac1_mask_periphral_intr - mask the periphral interrupt |
121 | * @mid: dma device for which masking is required | 118 | * @midc: dma channel for which masking is required |
122 | * | 119 | * |
123 | * Masks the DMA periphral interrupt | 120 | * Masks the DMA periphral interrupt |
124 | * this is valid for DMAC1 family controllers only | 121 | * this is valid for DMAC1 family controllers only |
125 | * This controller should have periphral mask registers already mapped | 122 | * This controller should have periphral mask registers already mapped |
126 | */ | 123 | */ |
127 | static void dmac1_mask_periphral_intr(struct middma_device *mid) | 124 | static void dmac1_mask_periphral_intr(struct intel_mid_dma_chan *midc) |
128 | { | 125 | { |
129 | u32 pimr; | 126 | u32 pimr; |
127 | struct middma_device *mid = to_middma_device(midc->chan.device); | ||
130 | 128 | ||
131 | if (mid->pimr_mask) { | 129 | if (mid->pimr_mask) { |
132 | pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); | 130 | pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); |
@@ -186,6 +184,7 @@ static void enable_dma_interrupt(struct intel_mid_dma_chan *midc) | |||
186 | static void disable_dma_interrupt(struct intel_mid_dma_chan *midc) | 184 | static void disable_dma_interrupt(struct intel_mid_dma_chan *midc) |
187 | { | 185 | { |
188 | /*Check LPE PISR, make sure fwd is disabled*/ | 186 | /*Check LPE PISR, make sure fwd is disabled*/ |
187 | dmac1_mask_periphral_intr(midc); | ||
189 | iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK); | 188 | iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK); |
190 | iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); | 189 | iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); |
191 | iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); | 190 | iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); |
@@ -282,15 +281,14 @@ static void midc_dostart(struct intel_mid_dma_chan *midc, | |||
282 | * callbacks but must be called with the lock held. | 281 | * callbacks but must be called with the lock held. |
283 | */ | 282 | */ |
284 | static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, | 283 | static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, |
285 | struct intel_mid_dma_desc *desc) | 284 | struct intel_mid_dma_desc *desc) |
286 | __releases(&midc->lock) __acquires(&midc->lock) | ||
287 | { | 285 | { |
288 | struct dma_async_tx_descriptor *txd = &desc->txd; | 286 | struct dma_async_tx_descriptor *txd = &desc->txd; |
289 | dma_async_tx_callback callback_txd = NULL; | 287 | dma_async_tx_callback callback_txd = NULL; |
290 | struct intel_mid_dma_lli *llitem; | 288 | struct intel_mid_dma_lli *llitem; |
291 | void *param_txd = NULL; | 289 | void *param_txd = NULL; |
292 | 290 | ||
293 | dma_cookie_complete(txd); | 291 | midc->completed = txd->cookie; |
294 | callback_txd = txd->callback; | 292 | callback_txd = txd->callback; |
295 | param_txd = txd->callback_param; | 293 | param_txd = txd->callback_param; |
296 | 294 | ||
@@ -314,7 +312,6 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, | |||
314 | pci_pool_free(desc->lli_pool, desc->lli, | 312 | pci_pool_free(desc->lli_pool, desc->lli, |
315 | desc->lli_phys); | 313 | desc->lli_phys); |
316 | pci_pool_destroy(desc->lli_pool); | 314 | pci_pool_destroy(desc->lli_pool); |
317 | desc->lli = NULL; | ||
318 | } | 315 | } |
319 | list_move(&desc->desc_node, &midc->free_list); | 316 | list_move(&desc->desc_node, &midc->free_list); |
320 | midc->busy = false; | 317 | midc->busy = false; |
@@ -394,15 +391,15 @@ static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc, | |||
394 | } | 391 | } |
395 | } | 392 | } |
396 | /*Populate CTL_HI values*/ | 393 | /*Populate CTL_HI values*/ |
397 | ctl_hi.ctlx.block_ts = get_block_ts(sg_dma_len(sg), | 394 | ctl_hi.ctlx.block_ts = get_block_ts(sg->length, |
398 | desc->width, | 395 | desc->width, |
399 | midc->dma->block_size); | 396 | midc->dma->block_size); |
400 | /*Populate SAR and DAR values*/ | 397 | /*Populate SAR and DAR values*/ |
401 | sg_phy_addr = sg_dma_address(sg); | 398 | sg_phy_addr = sg_phys(sg); |
402 | if (desc->dirn == DMA_MEM_TO_DEV) { | 399 | if (desc->dirn == DMA_TO_DEVICE) { |
403 | lli_bloc_desc->sar = sg_phy_addr; | 400 | lli_bloc_desc->sar = sg_phy_addr; |
404 | lli_bloc_desc->dar = mids->dma_slave.dst_addr; | 401 | lli_bloc_desc->dar = mids->dma_slave.dst_addr; |
405 | } else if (desc->dirn == DMA_DEV_TO_MEM) { | 402 | } else if (desc->dirn == DMA_FROM_DEVICE) { |
406 | lli_bloc_desc->sar = mids->dma_slave.src_addr; | 403 | lli_bloc_desc->sar = mids->dma_slave.src_addr; |
407 | lli_bloc_desc->dar = sg_phy_addr; | 404 | lli_bloc_desc->dar = sg_phy_addr; |
408 | } | 405 | } |
@@ -427,7 +424,7 @@ DMA engine callback Functions*/ | |||
427 | * intel_mid_dma_tx_submit - callback to submit DMA transaction | 424 | * intel_mid_dma_tx_submit - callback to submit DMA transaction |
428 | * @tx: dma engine descriptor | 425 | * @tx: dma engine descriptor |
429 | * | 426 | * |
430 | * Submit the DMA transaction for this descriptor, start if ch idle | 427 | * Submit the DMA trasaction for this descriptor, start if ch idle |
431 | */ | 428 | */ |
432 | static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx) | 429 | static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx) |
433 | { | 430 | { |
@@ -436,7 +433,14 @@ static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
436 | dma_cookie_t cookie; | 433 | dma_cookie_t cookie; |
437 | 434 | ||
438 | spin_lock_bh(&midc->lock); | 435 | spin_lock_bh(&midc->lock); |
439 | cookie = dma_cookie_assign(tx); | 436 | cookie = midc->chan.cookie; |
437 | |||
438 | if (++cookie < 0) | ||
439 | cookie = 1; | ||
440 | |||
441 | midc->chan.cookie = cookie; | ||
442 | desc->txd.cookie = cookie; | ||
443 | |||
440 | 444 | ||
441 | if (list_empty(&midc->active_list)) | 445 | if (list_empty(&midc->active_list)) |
442 | list_add_tail(&desc->desc_node, &midc->active_list); | 446 | list_add_tail(&desc->desc_node, &midc->active_list); |
@@ -477,18 +481,29 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, | |||
477 | dma_cookie_t cookie, | 481 | dma_cookie_t cookie, |
478 | struct dma_tx_state *txstate) | 482 | struct dma_tx_state *txstate) |
479 | { | 483 | { |
480 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); | 484 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); |
481 | enum dma_status ret; | 485 | dma_cookie_t last_used; |
486 | dma_cookie_t last_complete; | ||
487 | int ret; | ||
488 | |||
489 | last_complete = midc->completed; | ||
490 | last_used = chan->cookie; | ||
482 | 491 | ||
483 | ret = dma_cookie_status(chan, cookie, txstate); | 492 | ret = dma_async_is_complete(cookie, last_complete, last_used); |
484 | if (ret != DMA_SUCCESS) { | 493 | if (ret != DMA_SUCCESS) { |
485 | spin_lock_bh(&midc->lock); | ||
486 | midc_scan_descriptors(to_middma_device(chan->device), midc); | 494 | midc_scan_descriptors(to_middma_device(chan->device), midc); |
487 | spin_unlock_bh(&midc->lock); | ||
488 | 495 | ||
489 | ret = dma_cookie_status(chan, cookie, txstate); | 496 | last_complete = midc->completed; |
497 | last_used = chan->cookie; | ||
498 | |||
499 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
490 | } | 500 | } |
491 | 501 | ||
502 | if (txstate) { | ||
503 | txstate->last = last_complete; | ||
504 | txstate->used = last_used; | ||
505 | txstate->residue = 0; | ||
506 | } | ||
492 | return ret; | 507 | return ret; |
493 | } | 508 | } |
494 | 509 | ||
@@ -552,7 +567,6 @@ static int intel_mid_dma_device_control(struct dma_chan *chan, | |||
552 | pci_pool_free(desc->lli_pool, desc->lli, | 567 | pci_pool_free(desc->lli_pool, desc->lli, |
553 | desc->lli_phys); | 568 | desc->lli_phys); |
554 | pci_pool_destroy(desc->lli_pool); | 569 | pci_pool_destroy(desc->lli_pool); |
555 | desc->lli = NULL; | ||
556 | } | 570 | } |
557 | list_move(&desc->desc_node, &midc->free_list); | 571 | list_move(&desc->desc_node, &midc->free_list); |
558 | } | 572 | } |
@@ -619,13 +633,13 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( | |||
619 | if (midc->dma->pimr_mask) { | 633 | if (midc->dma->pimr_mask) { |
620 | cfg_hi.cfgx.protctl = 0x0; /*default value*/ | 634 | cfg_hi.cfgx.protctl = 0x0; /*default value*/ |
621 | cfg_hi.cfgx.fifo_mode = 1; | 635 | cfg_hi.cfgx.fifo_mode = 1; |
622 | if (mids->dma_slave.direction == DMA_MEM_TO_DEV) { | 636 | if (mids->dma_slave.direction == DMA_TO_DEVICE) { |
623 | cfg_hi.cfgx.src_per = 0; | 637 | cfg_hi.cfgx.src_per = 0; |
624 | if (mids->device_instance == 0) | 638 | if (mids->device_instance == 0) |
625 | cfg_hi.cfgx.dst_per = 3; | 639 | cfg_hi.cfgx.dst_per = 3; |
626 | if (mids->device_instance == 1) | 640 | if (mids->device_instance == 1) |
627 | cfg_hi.cfgx.dst_per = 1; | 641 | cfg_hi.cfgx.dst_per = 1; |
628 | } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) { | 642 | } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) { |
629 | if (mids->device_instance == 0) | 643 | if (mids->device_instance == 0) |
630 | cfg_hi.cfgx.src_per = 2; | 644 | cfg_hi.cfgx.src_per = 2; |
631 | if (mids->device_instance == 1) | 645 | if (mids->device_instance == 1) |
@@ -669,11 +683,11 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( | |||
669 | ctl_lo.ctlx.sinc = 0; | 683 | ctl_lo.ctlx.sinc = 0; |
670 | ctl_lo.ctlx.dinc = 0; | 684 | ctl_lo.ctlx.dinc = 0; |
671 | } else { | 685 | } else { |
672 | if (mids->dma_slave.direction == DMA_MEM_TO_DEV) { | 686 | if (mids->dma_slave.direction == DMA_TO_DEVICE) { |
673 | ctl_lo.ctlx.sinc = 0; | 687 | ctl_lo.ctlx.sinc = 0; |
674 | ctl_lo.ctlx.dinc = 2; | 688 | ctl_lo.ctlx.dinc = 2; |
675 | ctl_lo.ctlx.tt_fc = 1; | 689 | ctl_lo.ctlx.tt_fc = 1; |
676 | } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) { | 690 | } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) { |
677 | ctl_lo.ctlx.sinc = 2; | 691 | ctl_lo.ctlx.sinc = 2; |
678 | ctl_lo.ctlx.dinc = 0; | 692 | ctl_lo.ctlx.dinc = 0; |
679 | ctl_lo.ctlx.tt_fc = 2; | 693 | ctl_lo.ctlx.tt_fc = 2; |
@@ -714,14 +728,13 @@ err_desc_get: | |||
714 | * @sg_len: length of sg txn | 728 | * @sg_len: length of sg txn |
715 | * @direction: DMA transfer dirtn | 729 | * @direction: DMA transfer dirtn |
716 | * @flags: DMA flags | 730 | * @flags: DMA flags |
717 | * @context: transfer context (ignored) | ||
718 | * | 731 | * |
719 | * Prepares LLI based periphral transfer | 732 | * Prepares LLI based periphral transfer |
720 | */ | 733 | */ |
721 | static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( | 734 | static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( |
722 | struct dma_chan *chan, struct scatterlist *sgl, | 735 | struct dma_chan *chan, struct scatterlist *sgl, |
723 | unsigned int sg_len, enum dma_transfer_direction direction, | 736 | unsigned int sg_len, enum dma_data_direction direction, |
724 | unsigned long flags, void *context) | 737 | unsigned long flags) |
725 | { | 738 | { |
726 | struct intel_mid_dma_chan *midc = NULL; | 739 | struct intel_mid_dma_chan *midc = NULL; |
727 | struct intel_mid_dma_slave *mids = NULL; | 740 | struct intel_mid_dma_slave *mids = NULL; |
@@ -747,7 +760,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( | |||
747 | txd = intel_mid_dma_prep_memcpy(chan, | 760 | txd = intel_mid_dma_prep_memcpy(chan, |
748 | mids->dma_slave.dst_addr, | 761 | mids->dma_slave.dst_addr, |
749 | mids->dma_slave.src_addr, | 762 | mids->dma_slave.src_addr, |
750 | sg_dma_len(sgl), | 763 | sgl->length, |
751 | flags); | 764 | flags); |
752 | return txd; | 765 | return txd; |
753 | } else { | 766 | } else { |
@@ -759,7 +772,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( | |||
759 | pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", | 772 | pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", |
760 | sg_len, direction, flags); | 773 | sg_len, direction, flags); |
761 | 774 | ||
762 | txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sg_dma_len(sgl), flags); | 775 | txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags); |
763 | if (NULL == txd) { | 776 | if (NULL == txd) { |
764 | pr_err("MDMA: Prep memcpy failed\n"); | 777 | pr_err("MDMA: Prep memcpy failed\n"); |
765 | return NULL; | 778 | return NULL; |
@@ -815,6 +828,7 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan) | |||
815 | /*trying to free ch in use!!!!!*/ | 828 | /*trying to free ch in use!!!!!*/ |
816 | pr_err("ERR_MDMA: trying to free ch in use\n"); | 829 | pr_err("ERR_MDMA: trying to free ch in use\n"); |
817 | } | 830 | } |
831 | pm_runtime_put(&mid->pdev->dev); | ||
818 | spin_lock_bh(&midc->lock); | 832 | spin_lock_bh(&midc->lock); |
819 | midc->descs_allocated = 0; | 833 | midc->descs_allocated = 0; |
820 | list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { | 834 | list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { |
@@ -835,7 +849,6 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan) | |||
835 | /* Disable CH interrupts */ | 849 | /* Disable CH interrupts */ |
836 | iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); | 850 | iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); |
837 | iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); | 851 | iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); |
838 | pm_runtime_put(&mid->pdev->dev); | ||
839 | } | 852 | } |
840 | 853 | ||
841 | /** | 854 | /** |
@@ -856,7 +869,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) | |||
856 | pm_runtime_get_sync(&mid->pdev->dev); | 869 | pm_runtime_get_sync(&mid->pdev->dev); |
857 | 870 | ||
858 | if (mid->state == SUSPENDED) { | 871 | if (mid->state == SUSPENDED) { |
859 | if (dma_resume(&mid->pdev->dev)) { | 872 | if (dma_resume(mid->pdev)) { |
860 | pr_err("ERR_MDMA: resume failed"); | 873 | pr_err("ERR_MDMA: resume failed"); |
861 | return -EFAULT; | 874 | return -EFAULT; |
862 | } | 875 | } |
@@ -869,7 +882,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) | |||
869 | pm_runtime_put(&mid->pdev->dev); | 882 | pm_runtime_put(&mid->pdev->dev); |
870 | return -EIO; | 883 | return -EIO; |
871 | } | 884 | } |
872 | dma_cookie_init(chan); | 885 | midc->completed = chan->cookie = 1; |
873 | 886 | ||
874 | spin_lock_bh(&midc->lock); | 887 | spin_lock_bh(&midc->lock); |
875 | while (midc->descs_allocated < DESCS_PER_CHANNEL) { | 888 | while (midc->descs_allocated < DESCS_PER_CHANNEL) { |
@@ -1039,8 +1052,7 @@ static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) | |||
1039 | } | 1052 | } |
1040 | err_status &= mid->intr_mask; | 1053 | err_status &= mid->intr_mask; |
1041 | if (err_status) { | 1054 | if (err_status) { |
1042 | iowrite32((err_status << INT_MASK_WE), | 1055 | iowrite32(MASK_INTR_REG(err_status), mid->dma_base + MASK_ERR); |
1043 | mid->dma_base + MASK_ERR); | ||
1044 | call_tasklet = 1; | 1056 | call_tasklet = 1; |
1045 | } | 1057 | } |
1046 | if (call_tasklet) | 1058 | if (call_tasklet) |
@@ -1088,8 +1100,7 @@ static int mid_setup_dma(struct pci_dev *pdev) | |||
1088 | LNW_PERIPHRAL_MASK_SIZE); | 1100 | LNW_PERIPHRAL_MASK_SIZE); |
1089 | if (dma->mask_reg == NULL) { | 1101 | if (dma->mask_reg == NULL) { |
1090 | pr_err("ERR_MDMA:Can't map periphral intr space !!\n"); | 1102 | pr_err("ERR_MDMA:Can't map periphral intr space !!\n"); |
1091 | err = -ENOMEM; | 1103 | return -ENOMEM; |
1092 | goto err_ioremap; | ||
1093 | } | 1104 | } |
1094 | } else | 1105 | } else |
1095 | dma->mask_reg = NULL; | 1106 | dma->mask_reg = NULL; |
@@ -1102,7 +1113,8 @@ static int mid_setup_dma(struct pci_dev *pdev) | |||
1102 | struct intel_mid_dma_chan *midch = &dma->ch[i]; | 1113 | struct intel_mid_dma_chan *midch = &dma->ch[i]; |
1103 | 1114 | ||
1104 | midch->chan.device = &dma->common; | 1115 | midch->chan.device = &dma->common; |
1105 | dma_cookie_init(&midch->chan); | 1116 | midch->chan.cookie = 1; |
1117 | midch->chan.chan_id = i; | ||
1106 | midch->ch_id = dma->chan_base + i; | 1118 | midch->ch_id = dma->chan_base + i; |
1107 | pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id); | 1119 | pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id); |
1108 | 1120 | ||
@@ -1138,6 +1150,7 @@ static int mid_setup_dma(struct pci_dev *pdev) | |||
1138 | dma_cap_set(DMA_SLAVE, dma->common.cap_mask); | 1150 | dma_cap_set(DMA_SLAVE, dma->common.cap_mask); |
1139 | dma_cap_set(DMA_PRIVATE, dma->common.cap_mask); | 1151 | dma_cap_set(DMA_PRIVATE, dma->common.cap_mask); |
1140 | dma->common.dev = &pdev->dev; | 1152 | dma->common.dev = &pdev->dev; |
1153 | dma->common.chancnt = dma->max_chan; | ||
1141 | 1154 | ||
1142 | dma->common.device_alloc_chan_resources = | 1155 | dma->common.device_alloc_chan_resources = |
1143 | intel_mid_dma_alloc_chan_resources; | 1156 | intel_mid_dma_alloc_chan_resources; |
@@ -1186,9 +1199,6 @@ static int mid_setup_dma(struct pci_dev *pdev) | |||
1186 | err_engine: | 1199 | err_engine: |
1187 | free_irq(pdev->irq, dma); | 1200 | free_irq(pdev->irq, dma); |
1188 | err_irq: | 1201 | err_irq: |
1189 | if (dma->mask_reg) | ||
1190 | iounmap(dma->mask_reg); | ||
1191 | err_ioremap: | ||
1192 | pci_pool_destroy(dma->dma_pool); | 1202 | pci_pool_destroy(dma->dma_pool); |
1193 | err_dma_pool: | 1203 | err_dma_pool: |
1194 | pr_err("ERR_MDMA:setup_dma failed: %d\n", err); | 1204 | pr_err("ERR_MDMA:setup_dma failed: %d\n", err); |
@@ -1225,7 +1235,7 @@ static void middma_shutdown(struct pci_dev *pdev) | |||
1225 | * Initialize the PCI device, map BARs, query driver data. | 1235 | * Initialize the PCI device, map BARs, query driver data. |
1226 | * Call setup_dma to complete contoller and chan initilzation | 1236 | * Call setup_dma to complete contoller and chan initilzation |
1227 | */ | 1237 | */ |
1228 | static int intel_mid_dma_probe(struct pci_dev *pdev, | 1238 | static int __devinit intel_mid_dma_probe(struct pci_dev *pdev, |
1229 | const struct pci_device_id *id) | 1239 | const struct pci_device_id *id) |
1230 | { | 1240 | { |
1231 | struct middma_device *device; | 1241 | struct middma_device *device; |
@@ -1308,7 +1318,7 @@ err_enable_device: | |||
1308 | * Free up all resources and data | 1318 | * Free up all resources and data |
1309 | * Call shutdown_dma to complete contoller and chan cleanup | 1319 | * Call shutdown_dma to complete contoller and chan cleanup |
1310 | */ | 1320 | */ |
1311 | static void intel_mid_dma_remove(struct pci_dev *pdev) | 1321 | static void __devexit intel_mid_dma_remove(struct pci_dev *pdev) |
1312 | { | 1322 | { |
1313 | struct middma_device *device = pci_get_drvdata(pdev); | 1323 | struct middma_device *device = pci_get_drvdata(pdev); |
1314 | 1324 | ||
@@ -1330,9 +1340,8 @@ static void intel_mid_dma_remove(struct pci_dev *pdev) | |||
1330 | * | 1340 | * |
1331 | * This function is called by OS when a power event occurs | 1341 | * This function is called by OS when a power event occurs |
1332 | */ | 1342 | */ |
1333 | static int dma_suspend(struct device *dev) | 1343 | int dma_suspend(struct pci_dev *pci, pm_message_t state) |
1334 | { | 1344 | { |
1335 | struct pci_dev *pci = to_pci_dev(dev); | ||
1336 | int i; | 1345 | int i; |
1337 | struct middma_device *device = pci_get_drvdata(pci); | 1346 | struct middma_device *device = pci_get_drvdata(pci); |
1338 | pr_debug("MDMA: dma_suspend called\n"); | 1347 | pr_debug("MDMA: dma_suspend called\n"); |
@@ -1341,7 +1350,6 @@ static int dma_suspend(struct device *dev) | |||
1341 | if (device->ch[i].in_use) | 1350 | if (device->ch[i].in_use) |
1342 | return -EAGAIN; | 1351 | return -EAGAIN; |
1343 | } | 1352 | } |
1344 | dmac1_mask_periphral_intr(device); | ||
1345 | device->state = SUSPENDED; | 1353 | device->state = SUSPENDED; |
1346 | pci_save_state(pci); | 1354 | pci_save_state(pci); |
1347 | pci_disable_device(pci); | 1355 | pci_disable_device(pci); |
@@ -1356,9 +1364,8 @@ static int dma_suspend(struct device *dev) | |||
1356 | * | 1364 | * |
1357 | * This function is called by OS when a power event occurs | 1365 | * This function is called by OS when a power event occurs |
1358 | */ | 1366 | */ |
1359 | int dma_resume(struct device *dev) | 1367 | int dma_resume(struct pci_dev *pci) |
1360 | { | 1368 | { |
1361 | struct pci_dev *pci = to_pci_dev(dev); | ||
1362 | int ret; | 1369 | int ret; |
1363 | struct middma_device *device = pci_get_drvdata(pci); | 1370 | struct middma_device *device = pci_get_drvdata(pci); |
1364 | 1371 | ||
@@ -1424,16 +1431,16 @@ static const struct dev_pm_ops intel_mid_dma_pm = { | |||
1424 | .runtime_suspend = dma_runtime_suspend, | 1431 | .runtime_suspend = dma_runtime_suspend, |
1425 | .runtime_resume = dma_runtime_resume, | 1432 | .runtime_resume = dma_runtime_resume, |
1426 | .runtime_idle = dma_runtime_idle, | 1433 | .runtime_idle = dma_runtime_idle, |
1427 | .suspend = dma_suspend, | ||
1428 | .resume = dma_resume, | ||
1429 | }; | 1434 | }; |
1430 | 1435 | ||
1431 | static struct pci_driver intel_mid_dma_pci_driver = { | 1436 | static struct pci_driver intel_mid_dma_pci_driver = { |
1432 | .name = "Intel MID DMA", | 1437 | .name = "Intel MID DMA", |
1433 | .id_table = intel_mid_dma_ids, | 1438 | .id_table = intel_mid_dma_ids, |
1434 | .probe = intel_mid_dma_probe, | 1439 | .probe = intel_mid_dma_probe, |
1435 | .remove = intel_mid_dma_remove, | 1440 | .remove = __devexit_p(intel_mid_dma_remove), |
1436 | #ifdef CONFIG_PM | 1441 | #ifdef CONFIG_PM |
1442 | .suspend = dma_suspend, | ||
1443 | .resume = dma_resume, | ||
1437 | .driver = { | 1444 | .driver = { |
1438 | .pm = &intel_mid_dma_pm, | 1445 | .pm = &intel_mid_dma_pm, |
1439 | }, | 1446 | }, |
diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h index 17b42192ea5..aea5ee88ce0 100644 --- a/drivers/dma/intel_mid_dma_regs.h +++ b/drivers/dma/intel_mid_dma_regs.h | |||
@@ -165,12 +165,13 @@ union intel_mid_dma_cfg_hi { | |||
165 | * @dma_base: MMIO register space DMA engine base pointer | 165 | * @dma_base: MMIO register space DMA engine base pointer |
166 | * @ch_id: DMA channel id | 166 | * @ch_id: DMA channel id |
167 | * @lock: channel spinlock | 167 | * @lock: channel spinlock |
168 | * @completed: DMA cookie | ||
168 | * @active_list: current active descriptors | 169 | * @active_list: current active descriptors |
169 | * @queue: current queued up descriptors | 170 | * @queue: current queued up descriptors |
170 | * @free_list: current free descriptors | 171 | * @free_list: current free descriptors |
171 | * @slave: dma slave structure | 172 | * @slave: dma slave struture |
172 | * @descs_allocated: total number of descriptors allocated | 173 | * @descs_allocated: total number of decsiptors allocated |
173 | * @dma: dma device structure pointer | 174 | * @dma: dma device struture pointer |
174 | * @busy: bool representing if ch is busy (active txn) or not | 175 | * @busy: bool representing if ch is busy (active txn) or not |
175 | * @in_use: bool representing if ch is in use or not | 176 | * @in_use: bool representing if ch is in use or not |
176 | * @raw_tfr: raw trf interrupt received | 177 | * @raw_tfr: raw trf interrupt received |
@@ -182,6 +183,7 @@ struct intel_mid_dma_chan { | |||
182 | void __iomem *dma_base; | 183 | void __iomem *dma_base; |
183 | int ch_id; | 184 | int ch_id; |
184 | spinlock_t lock; | 185 | spinlock_t lock; |
186 | dma_cookie_t completed; | ||
185 | struct list_head active_list; | 187 | struct list_head active_list; |
186 | struct list_head queue; | 188 | struct list_head queue; |
187 | struct list_head free_list; | 189 | struct list_head free_list; |
@@ -260,7 +262,7 @@ struct intel_mid_dma_desc { | |||
260 | unsigned int lli_length; | 262 | unsigned int lli_length; |
261 | unsigned int current_lli; | 263 | unsigned int current_lli; |
262 | dma_addr_t next; | 264 | dma_addr_t next; |
263 | enum dma_transfer_direction dirn; | 265 | enum dma_data_direction dirn; |
264 | enum dma_status status; | 266 | enum dma_status status; |
265 | enum dma_slave_buswidth width; /*width of DMA txn*/ | 267 | enum dma_slave_buswidth width; /*width of DMA txn*/ |
266 | enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ | 268 | enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ |
@@ -294,6 +296,6 @@ static inline struct intel_mid_dma_slave *to_intel_mid_dma_slave | |||
294 | } | 296 | } |
295 | 297 | ||
296 | 298 | ||
297 | int dma_resume(struct device *dev); | 299 | int dma_resume(struct pci_dev *pci); |
298 | 300 | ||
299 | #endif /*__INTEL_MID_DMAC_REGS_H__*/ | 301 | #endif /*__INTEL_MID_DMAC_REGS_H__*/ |
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c index 9b041858d10..abd9038e06b 100644 --- a/drivers/dma/ioat/dca.c +++ b/drivers/dma/ioat/dca.c | |||
@@ -242,7 +242,8 @@ static struct dca_ops ioat_dca_ops = { | |||
242 | }; | 242 | }; |
243 | 243 | ||
244 | 244 | ||
245 | struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase) | 245 | struct dca_provider * __devinit |
246 | ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase) | ||
246 | { | 247 | { |
247 | struct dca_provider *dca; | 248 | struct dca_provider *dca; |
248 | struct ioat_dca_priv *ioatdca; | 249 | struct ioat_dca_priv *ioatdca; |
@@ -407,7 +408,8 @@ static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset) | |||
407 | return slots; | 408 | return slots; |
408 | } | 409 | } |
409 | 410 | ||
410 | struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase) | 411 | struct dca_provider * __devinit |
412 | ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase) | ||
411 | { | 413 | { |
412 | struct dca_provider *dca; | 414 | struct dca_provider *dca; |
413 | struct ioat_dca_priv *ioatdca; | 415 | struct ioat_dca_priv *ioatdca; |
@@ -602,24 +604,8 @@ static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset) | |||
602 | return slots; | 604 | return slots; |
603 | } | 605 | } |
604 | 606 | ||
605 | static inline int dca3_tag_map_invalid(u8 *tag_map) | 607 | struct dca_provider * __devinit |
606 | { | 608 | ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) |
607 | /* | ||
608 | * If the tag map is not programmed by the BIOS the default is: | ||
609 | * 0x80 0x80 0x80 0x80 0x80 0x00 0x00 0x00 | ||
610 | * | ||
611 | * This an invalid map and will result in only 2 possible tags | ||
612 | * 0x1F and 0x00. 0x00 is an invalid DCA tag so we know that | ||
613 | * this entire definition is invalid. | ||
614 | */ | ||
615 | return ((tag_map[0] == DCA_TAG_MAP_VALID) && | ||
616 | (tag_map[1] == DCA_TAG_MAP_VALID) && | ||
617 | (tag_map[2] == DCA_TAG_MAP_VALID) && | ||
618 | (tag_map[3] == DCA_TAG_MAP_VALID) && | ||
619 | (tag_map[4] == DCA_TAG_MAP_VALID)); | ||
620 | } | ||
621 | |||
622 | struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) | ||
623 | { | 609 | { |
624 | struct dca_provider *dca; | 610 | struct dca_provider *dca; |
625 | struct ioat_dca_priv *ioatdca; | 611 | struct ioat_dca_priv *ioatdca; |
@@ -688,12 +674,6 @@ struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) | |||
688 | ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK; | 674 | ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK; |
689 | } | 675 | } |
690 | 676 | ||
691 | if (dca3_tag_map_invalid(ioatdca->tag_map)) { | ||
692 | dev_err(&pdev->dev, "APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n"); | ||
693 | free_dca_provider(dca); | ||
694 | return NULL; | ||
695 | } | ||
696 | |||
697 | err = register_dca_provider(dca, &pdev->dev); | 677 | err = register_dca_provider(dca, &pdev->dev); |
698 | if (err) { | 678 | if (err) { |
699 | free_dca_provider(dca); | 679 | free_dca_provider(dca); |
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 1a68a8ba87e..a4d6cb0c034 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -40,8 +40,6 @@ | |||
40 | #include "registers.h" | 40 | #include "registers.h" |
41 | #include "hw.h" | 41 | #include "hw.h" |
42 | 42 | ||
43 | #include "../dmaengine.h" | ||
44 | |||
45 | int ioat_pending_level = 4; | 43 | int ioat_pending_level = 4; |
46 | module_param(ioat_pending_level, int, 0644); | 44 | module_param(ioat_pending_level, int, 0644); |
47 | MODULE_PARM_DESC(ioat_pending_level, | 45 | MODULE_PARM_DESC(ioat_pending_level, |
@@ -109,7 +107,6 @@ void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *c | |||
109 | chan->reg_base = device->reg_base + (0x80 * (idx + 1)); | 107 | chan->reg_base = device->reg_base + (0x80 * (idx + 1)); |
110 | spin_lock_init(&chan->cleanup_lock); | 108 | spin_lock_init(&chan->cleanup_lock); |
111 | chan->common.device = dma; | 109 | chan->common.device = dma; |
112 | dma_cookie_init(&chan->common); | ||
113 | list_add_tail(&chan->common.device_node, &dma->channels); | 110 | list_add_tail(&chan->common.device_node, &dma->channels); |
114 | device->idx[idx] = chan; | 111 | device->idx[idx] = chan; |
115 | init_timer(&chan->timer); | 112 | init_timer(&chan->timer); |
@@ -238,7 +235,12 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | |||
238 | 235 | ||
239 | spin_lock_bh(&ioat->desc_lock); | 236 | spin_lock_bh(&ioat->desc_lock); |
240 | /* cookie incr and addition to used_list must be atomic */ | 237 | /* cookie incr and addition to used_list must be atomic */ |
241 | cookie = dma_cookie_assign(tx); | 238 | cookie = c->cookie; |
239 | cookie++; | ||
240 | if (cookie < 0) | ||
241 | cookie = 1; | ||
242 | c->cookie = cookie; | ||
243 | tx->cookie = cookie; | ||
242 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); | 244 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); |
243 | 245 | ||
244 | /* write address into NextDescriptor field of last desc in chain */ | 246 | /* write address into NextDescriptor field of last desc in chain */ |
@@ -546,9 +548,9 @@ void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, | |||
546 | PCI_DMA_TODEVICE, flags, 0); | 548 | PCI_DMA_TODEVICE, flags, 0); |
547 | } | 549 | } |
548 | 550 | ||
549 | dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan) | 551 | unsigned long ioat_get_current_completion(struct ioat_chan_common *chan) |
550 | { | 552 | { |
551 | dma_addr_t phys_complete; | 553 | unsigned long phys_complete; |
552 | u64 completion; | 554 | u64 completion; |
553 | 555 | ||
554 | completion = *chan->completion; | 556 | completion = *chan->completion; |
@@ -569,7 +571,7 @@ dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan) | |||
569 | } | 571 | } |
570 | 572 | ||
571 | bool ioat_cleanup_preamble(struct ioat_chan_common *chan, | 573 | bool ioat_cleanup_preamble(struct ioat_chan_common *chan, |
572 | dma_addr_t *phys_complete) | 574 | unsigned long *phys_complete) |
573 | { | 575 | { |
574 | *phys_complete = ioat_get_current_completion(chan); | 576 | *phys_complete = ioat_get_current_completion(chan); |
575 | if (*phys_complete == chan->last_completion) | 577 | if (*phys_complete == chan->last_completion) |
@@ -580,14 +582,14 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan, | |||
580 | return true; | 582 | return true; |
581 | } | 583 | } |
582 | 584 | ||
583 | static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete) | 585 | static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete) |
584 | { | 586 | { |
585 | struct ioat_chan_common *chan = &ioat->base; | 587 | struct ioat_chan_common *chan = &ioat->base; |
586 | struct list_head *_desc, *n; | 588 | struct list_head *_desc, *n; |
587 | struct dma_async_tx_descriptor *tx; | 589 | struct dma_async_tx_descriptor *tx; |
588 | 590 | ||
589 | dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n", | 591 | dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n", |
590 | __func__, (unsigned long long) phys_complete); | 592 | __func__, phys_complete); |
591 | list_for_each_safe(_desc, n, &ioat->used_desc) { | 593 | list_for_each_safe(_desc, n, &ioat->used_desc) { |
592 | struct ioat_desc_sw *desc; | 594 | struct ioat_desc_sw *desc; |
593 | 595 | ||
@@ -601,7 +603,8 @@ static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete) | |||
601 | */ | 603 | */ |
602 | dump_desc_dbg(ioat, desc); | 604 | dump_desc_dbg(ioat, desc); |
603 | if (tx->cookie) { | 605 | if (tx->cookie) { |
604 | dma_cookie_complete(tx); | 606 | chan->completed_cookie = tx->cookie; |
607 | tx->cookie = 0; | ||
605 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); | 608 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); |
606 | ioat->active -= desc->hw->tx_cnt; | 609 | ioat->active -= desc->hw->tx_cnt; |
607 | if (tx->callback) { | 610 | if (tx->callback) { |
@@ -652,7 +655,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete) | |||
652 | static void ioat1_cleanup(struct ioat_dma_chan *ioat) | 655 | static void ioat1_cleanup(struct ioat_dma_chan *ioat) |
653 | { | 656 | { |
654 | struct ioat_chan_common *chan = &ioat->base; | 657 | struct ioat_chan_common *chan = &ioat->base; |
655 | dma_addr_t phys_complete; | 658 | unsigned long phys_complete; |
656 | 659 | ||
657 | prefetch(chan->completion); | 660 | prefetch(chan->completion); |
658 | 661 | ||
@@ -698,7 +701,7 @@ static void ioat1_timer_event(unsigned long data) | |||
698 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | 701 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); |
699 | spin_unlock_bh(&ioat->desc_lock); | 702 | spin_unlock_bh(&ioat->desc_lock); |
700 | } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { | 703 | } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { |
701 | dma_addr_t phys_complete; | 704 | unsigned long phys_complete; |
702 | 705 | ||
703 | spin_lock_bh(&ioat->desc_lock); | 706 | spin_lock_bh(&ioat->desc_lock); |
704 | /* if we haven't made progress and we have already | 707 | /* if we haven't made progress and we have already |
@@ -730,15 +733,13 @@ ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, | |||
730 | { | 733 | { |
731 | struct ioat_chan_common *chan = to_chan_common(c); | 734 | struct ioat_chan_common *chan = to_chan_common(c); |
732 | struct ioatdma_device *device = chan->device; | 735 | struct ioatdma_device *device = chan->device; |
733 | enum dma_status ret; | ||
734 | 736 | ||
735 | ret = dma_cookie_status(c, cookie, txstate); | 737 | if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS) |
736 | if (ret == DMA_SUCCESS) | 738 | return DMA_SUCCESS; |
737 | return ret; | ||
738 | 739 | ||
739 | device->cleanup_fn((unsigned long) c); | 740 | device->cleanup_fn((unsigned long) c); |
740 | 741 | ||
741 | return dma_cookie_status(c, cookie, txstate); | 742 | return ioat_tx_status(c, cookie, txstate); |
742 | } | 743 | } |
743 | 744 | ||
744 | static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) | 745 | static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) |
@@ -782,7 +783,7 @@ static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) | |||
782 | */ | 783 | */ |
783 | #define IOAT_TEST_SIZE 2000 | 784 | #define IOAT_TEST_SIZE 2000 |
784 | 785 | ||
785 | static void ioat_dma_test_callback(void *dma_async_param) | 786 | static void __devinit ioat_dma_test_callback(void *dma_async_param) |
786 | { | 787 | { |
787 | struct completion *cmp = dma_async_param; | 788 | struct completion *cmp = dma_async_param; |
788 | 789 | ||
@@ -793,7 +794,7 @@ static void ioat_dma_test_callback(void *dma_async_param) | |||
793 | * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. | 794 | * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. |
794 | * @device: device to be tested | 795 | * @device: device to be tested |
795 | */ | 796 | */ |
796 | int ioat_dma_self_test(struct ioatdma_device *device) | 797 | int __devinit ioat_dma_self_test(struct ioatdma_device *device) |
797 | { | 798 | { |
798 | int i; | 799 | int i; |
799 | u8 *src; | 800 | u8 *src; |
@@ -994,7 +995,7 @@ static void ioat_disable_interrupts(struct ioatdma_device *device) | |||
994 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); | 995 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); |
995 | } | 996 | } |
996 | 997 | ||
997 | int ioat_probe(struct ioatdma_device *device) | 998 | int __devinit ioat_probe(struct ioatdma_device *device) |
998 | { | 999 | { |
999 | int err = -ENODEV; | 1000 | int err = -ENODEV; |
1000 | struct dma_device *dma = &device->common; | 1001 | struct dma_device *dma = &device->common; |
@@ -1049,7 +1050,7 @@ err_dma_pool: | |||
1049 | return err; | 1050 | return err; |
1050 | } | 1051 | } |
1051 | 1052 | ||
1052 | int ioat_register(struct ioatdma_device *device) | 1053 | int __devinit ioat_register(struct ioatdma_device *device) |
1053 | { | 1054 | { |
1054 | int err = dma_async_device_register(&device->common); | 1055 | int err = dma_async_device_register(&device->common); |
1055 | 1056 | ||
@@ -1183,7 +1184,7 @@ void ioat_kobject_del(struct ioatdma_device *device) | |||
1183 | } | 1184 | } |
1184 | } | 1185 | } |
1185 | 1186 | ||
1186 | int ioat1_dma_probe(struct ioatdma_device *device, int dca) | 1187 | int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca) |
1187 | { | 1188 | { |
1188 | struct pci_dev *pdev = device->pdev; | 1189 | struct pci_dev *pdev = device->pdev; |
1189 | struct dma_device *dma; | 1190 | struct dma_device *dma; |
@@ -1216,7 +1217,7 @@ int ioat1_dma_probe(struct ioatdma_device *device, int dca) | |||
1216 | return err; | 1217 | return err; |
1217 | } | 1218 | } |
1218 | 1219 | ||
1219 | void ioat_dma_remove(struct ioatdma_device *device) | 1220 | void __devexit ioat_dma_remove(struct ioatdma_device *device) |
1220 | { | 1221 | { |
1221 | struct dma_device *dma = &device->common; | 1222 | struct dma_device *dma = &device->common; |
1222 | 1223 | ||
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 087935f1565..5216c8a92a2 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
@@ -88,8 +88,9 @@ struct ioatdma_device { | |||
88 | struct ioat_chan_common { | 88 | struct ioat_chan_common { |
89 | struct dma_chan common; | 89 | struct dma_chan common; |
90 | void __iomem *reg_base; | 90 | void __iomem *reg_base; |
91 | dma_addr_t last_completion; | 91 | unsigned long last_completion; |
92 | spinlock_t cleanup_lock; | 92 | spinlock_t cleanup_lock; |
93 | dma_cookie_t completed_cookie; | ||
93 | unsigned long state; | 94 | unsigned long state; |
94 | #define IOAT_COMPLETION_PENDING 0 | 95 | #define IOAT_COMPLETION_PENDING 0 |
95 | #define IOAT_COMPLETION_ACK 1 | 96 | #define IOAT_COMPLETION_ACK 1 |
@@ -142,6 +143,28 @@ static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c) | |||
142 | return container_of(chan, struct ioat_dma_chan, base); | 143 | return container_of(chan, struct ioat_dma_chan, base); |
143 | } | 144 | } |
144 | 145 | ||
146 | /** | ||
147 | * ioat_tx_status - poll the status of an ioat transaction | ||
148 | * @c: channel handle | ||
149 | * @cookie: transaction identifier | ||
150 | * @txstate: if set, updated with the transaction state | ||
151 | */ | ||
152 | static inline enum dma_status | ||
153 | ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, | ||
154 | struct dma_tx_state *txstate) | ||
155 | { | ||
156 | struct ioat_chan_common *chan = to_chan_common(c); | ||
157 | dma_cookie_t last_used; | ||
158 | dma_cookie_t last_complete; | ||
159 | |||
160 | last_used = c->cookie; | ||
161 | last_complete = chan->completed_cookie; | ||
162 | |||
163 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
164 | |||
165 | return dma_async_is_complete(cookie, last_complete, last_used); | ||
166 | } | ||
167 | |||
145 | /* wrapper around hardware descriptor format + additional software fields */ | 168 | /* wrapper around hardware descriptor format + additional software fields */ |
146 | 169 | ||
147 | /** | 170 | /** |
@@ -303,13 +326,14 @@ static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len, | |||
303 | pci_unmap_page(pdev, addr, len, direction); | 326 | pci_unmap_page(pdev, addr, len, direction); |
304 | } | 327 | } |
305 | 328 | ||
306 | int ioat_probe(struct ioatdma_device *device); | 329 | int __devinit ioat_probe(struct ioatdma_device *device); |
307 | int ioat_register(struct ioatdma_device *device); | 330 | int __devinit ioat_register(struct ioatdma_device *device); |
308 | int ioat1_dma_probe(struct ioatdma_device *dev, int dca); | 331 | int __devinit ioat1_dma_probe(struct ioatdma_device *dev, int dca); |
309 | int ioat_dma_self_test(struct ioatdma_device *device); | 332 | int __devinit ioat_dma_self_test(struct ioatdma_device *device); |
310 | void ioat_dma_remove(struct ioatdma_device *device); | 333 | void __devexit ioat_dma_remove(struct ioatdma_device *device); |
311 | struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); | 334 | struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev, |
312 | dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan); | 335 | void __iomem *iobase); |
336 | unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); | ||
313 | void ioat_init_channel(struct ioatdma_device *device, | 337 | void ioat_init_channel(struct ioatdma_device *device, |
314 | struct ioat_chan_common *chan, int idx); | 338 | struct ioat_chan_common *chan, int idx); |
315 | enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, | 339 | enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, |
@@ -317,7 +341,7 @@ enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, | |||
317 | void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, | 341 | void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, |
318 | size_t len, struct ioat_dma_descriptor *hw); | 342 | size_t len, struct ioat_dma_descriptor *hw); |
319 | bool ioat_cleanup_preamble(struct ioat_chan_common *chan, | 343 | bool ioat_cleanup_preamble(struct ioat_chan_common *chan, |
320 | dma_addr_t *phys_complete); | 344 | unsigned long *phys_complete); |
321 | void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); | 345 | void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); |
322 | void ioat_kobject_del(struct ioatdma_device *device); | 346 | void ioat_kobject_del(struct ioatdma_device *device); |
323 | extern const struct sysfs_ops ioat_sysfs_ops; | 347 | extern const struct sysfs_ops ioat_sysfs_ops; |
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 82d4e306c32..5d65f837797 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c | |||
@@ -41,8 +41,6 @@ | |||
41 | #include "registers.h" | 41 | #include "registers.h" |
42 | #include "hw.h" | 42 | #include "hw.h" |
43 | 43 | ||
44 | #include "../dmaengine.h" | ||
45 | |||
46 | int ioat_ring_alloc_order = 8; | 44 | int ioat_ring_alloc_order = 8; |
47 | module_param(ioat_ring_alloc_order, int, 0644); | 45 | module_param(ioat_ring_alloc_order, int, 0644); |
48 | MODULE_PARM_DESC(ioat_ring_alloc_order, | 46 | MODULE_PARM_DESC(ioat_ring_alloc_order, |
@@ -128,7 +126,7 @@ static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat) | |||
128 | spin_unlock_bh(&ioat->prep_lock); | 126 | spin_unlock_bh(&ioat->prep_lock); |
129 | } | 127 | } |
130 | 128 | ||
131 | static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) | 129 | static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) |
132 | { | 130 | { |
133 | struct ioat_chan_common *chan = &ioat->base; | 131 | struct ioat_chan_common *chan = &ioat->base; |
134 | struct dma_async_tx_descriptor *tx; | 132 | struct dma_async_tx_descriptor *tx; |
@@ -149,7 +147,8 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) | |||
149 | dump_desc_dbg(ioat, desc); | 147 | dump_desc_dbg(ioat, desc); |
150 | if (tx->cookie) { | 148 | if (tx->cookie) { |
151 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); | 149 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); |
152 | dma_cookie_complete(tx); | 150 | chan->completed_cookie = tx->cookie; |
151 | tx->cookie = 0; | ||
153 | if (tx->callback) { | 152 | if (tx->callback) { |
154 | tx->callback(tx->callback_param); | 153 | tx->callback(tx->callback_param); |
155 | tx->callback = NULL; | 154 | tx->callback = NULL; |
@@ -179,7 +178,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) | |||
179 | static void ioat2_cleanup(struct ioat2_dma_chan *ioat) | 178 | static void ioat2_cleanup(struct ioat2_dma_chan *ioat) |
180 | { | 179 | { |
181 | struct ioat_chan_common *chan = &ioat->base; | 180 | struct ioat_chan_common *chan = &ioat->base; |
182 | dma_addr_t phys_complete; | 181 | unsigned long phys_complete; |
183 | 182 | ||
184 | spin_lock_bh(&chan->cleanup_lock); | 183 | spin_lock_bh(&chan->cleanup_lock); |
185 | if (ioat_cleanup_preamble(chan, &phys_complete)) | 184 | if (ioat_cleanup_preamble(chan, &phys_complete)) |
@@ -260,7 +259,7 @@ int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo) | |||
260 | static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) | 259 | static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) |
261 | { | 260 | { |
262 | struct ioat_chan_common *chan = &ioat->base; | 261 | struct ioat_chan_common *chan = &ioat->base; |
263 | dma_addr_t phys_complete; | 262 | unsigned long phys_complete; |
264 | 263 | ||
265 | ioat2_quiesce(chan, 0); | 264 | ioat2_quiesce(chan, 0); |
266 | if (ioat_cleanup_preamble(chan, &phys_complete)) | 265 | if (ioat_cleanup_preamble(chan, &phys_complete)) |
@@ -275,7 +274,7 @@ void ioat2_timer_event(unsigned long data) | |||
275 | struct ioat_chan_common *chan = &ioat->base; | 274 | struct ioat_chan_common *chan = &ioat->base; |
276 | 275 | ||
277 | if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { | 276 | if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { |
278 | dma_addr_t phys_complete; | 277 | unsigned long phys_complete; |
279 | u64 status; | 278 | u64 status; |
280 | 279 | ||
281 | status = ioat_chansts(chan); | 280 | status = ioat_chansts(chan); |
@@ -399,9 +398,13 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) | |||
399 | struct dma_chan *c = tx->chan; | 398 | struct dma_chan *c = tx->chan; |
400 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | 399 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); |
401 | struct ioat_chan_common *chan = &ioat->base; | 400 | struct ioat_chan_common *chan = &ioat->base; |
402 | dma_cookie_t cookie; | 401 | dma_cookie_t cookie = c->cookie; |
403 | 402 | ||
404 | cookie = dma_cookie_assign(tx); | 403 | cookie++; |
404 | if (cookie < 0) | ||
405 | cookie = 1; | ||
406 | tx->cookie = cookie; | ||
407 | c->cookie = cookie; | ||
405 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); | 408 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); |
406 | 409 | ||
407 | if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) | 410 | if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) |
@@ -434,11 +437,12 @@ static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t f | |||
434 | return NULL; | 437 | return NULL; |
435 | memset(hw, 0, sizeof(*hw)); | 438 | memset(hw, 0, sizeof(*hw)); |
436 | 439 | ||
437 | desc = kmem_cache_zalloc(ioat2_cache, flags); | 440 | desc = kmem_cache_alloc(ioat2_cache, flags); |
438 | if (!desc) { | 441 | if (!desc) { |
439 | pci_pool_free(dma->dma_pool, hw, phys); | 442 | pci_pool_free(dma->dma_pool, hw, phys); |
440 | return NULL; | 443 | return NULL; |
441 | } | 444 | } |
445 | memset(desc, 0, sizeof(*desc)); | ||
442 | 446 | ||
443 | dma_async_tx_descriptor_init(&desc->txd, chan); | 447 | dma_async_tx_descriptor_init(&desc->txd, chan); |
444 | desc->txd.tx_submit = ioat2_tx_submit_unlock; | 448 | desc->txd.tx_submit = ioat2_tx_submit_unlock; |
@@ -571,9 +575,9 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order) | |||
571 | */ | 575 | */ |
572 | struct ioat_chan_common *chan = &ioat->base; | 576 | struct ioat_chan_common *chan = &ioat->base; |
573 | struct dma_chan *c = &chan->common; | 577 | struct dma_chan *c = &chan->common; |
574 | const u32 curr_size = ioat2_ring_size(ioat); | 578 | const u16 curr_size = ioat2_ring_size(ioat); |
575 | const u16 active = ioat2_ring_active(ioat); | 579 | const u16 active = ioat2_ring_active(ioat); |
576 | const u32 new_size = 1 << order; | 580 | const u16 new_size = 1 << order; |
577 | struct ioat_ring_ent **ring; | 581 | struct ioat_ring_ent **ring; |
578 | u16 i; | 582 | u16 i; |
579 | 583 | ||
@@ -862,7 +866,7 @@ struct kobj_type ioat2_ktype = { | |||
862 | .default_attrs = ioat2_attrs, | 866 | .default_attrs = ioat2_attrs, |
863 | }; | 867 | }; |
864 | 868 | ||
865 | int ioat2_dma_probe(struct ioatdma_device *device, int dca) | 869 | int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) |
866 | { | 870 | { |
867 | struct pci_dev *pdev = device->pdev; | 871 | struct pci_dev *pdev = device->pdev; |
868 | struct dma_device *dma; | 872 | struct dma_device *dma; |
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index e100f644e34..a2c413b2b8d 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h | |||
@@ -74,7 +74,7 @@ static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c) | |||
74 | return container_of(chan, struct ioat2_dma_chan, base); | 74 | return container_of(chan, struct ioat2_dma_chan, base); |
75 | } | 75 | } |
76 | 76 | ||
77 | static inline u32 ioat2_ring_size(struct ioat2_dma_chan *ioat) | 77 | static inline u16 ioat2_ring_size(struct ioat2_dma_chan *ioat) |
78 | { | 78 | { |
79 | return 1 << ioat->alloc_order; | 79 | return 1 << ioat->alloc_order; |
80 | } | 80 | } |
@@ -91,7 +91,7 @@ static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat) | |||
91 | return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat)); | 91 | return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat)); |
92 | } | 92 | } |
93 | 93 | ||
94 | static inline u32 ioat2_ring_space(struct ioat2_dma_chan *ioat) | 94 | static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat) |
95 | { | 95 | { |
96 | return ioat2_ring_size(ioat) - ioat2_ring_active(ioat); | 96 | return ioat2_ring_size(ioat) - ioat2_ring_active(ioat); |
97 | } | 97 | } |
@@ -155,10 +155,10 @@ static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr) | |||
155 | chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); | 155 | chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); |
156 | } | 156 | } |
157 | 157 | ||
158 | int ioat2_dma_probe(struct ioatdma_device *dev, int dca); | 158 | int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca); |
159 | int ioat3_dma_probe(struct ioatdma_device *dev, int dca); | 159 | int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca); |
160 | struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); | 160 | struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); |
161 | struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); | 161 | struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); |
162 | int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs); | 162 | int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs); |
163 | int ioat2_enumerate_channels(struct ioatdma_device *device); | 163 | int ioat2_enumerate_channels(struct ioatdma_device *device); |
164 | struct dma_async_tx_descriptor * | 164 | struct dma_async_tx_descriptor * |
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index e5fc944de1f..f519c93a61e 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
@@ -61,7 +61,6 @@ | |||
61 | #include <linux/dmaengine.h> | 61 | #include <linux/dmaengine.h> |
62 | #include <linux/dma-mapping.h> | 62 | #include <linux/dma-mapping.h> |
63 | #include <linux/prefetch.h> | 63 | #include <linux/prefetch.h> |
64 | #include "../dmaengine.h" | ||
65 | #include "registers.h" | 64 | #include "registers.h" |
66 | #include "hw.h" | 65 | #include "hw.h" |
67 | #include "dma.h" | 66 | #include "dma.h" |
@@ -257,7 +256,7 @@ static bool desc_has_ext(struct ioat_ring_ent *desc) | |||
257 | * The difference from the dma_v2.c __cleanup() is that this routine | 256 | * The difference from the dma_v2.c __cleanup() is that this routine |
258 | * handles extended descriptors and dma-unmapping raid operations. | 257 | * handles extended descriptors and dma-unmapping raid operations. |
259 | */ | 258 | */ |
260 | static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) | 259 | static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) |
261 | { | 260 | { |
262 | struct ioat_chan_common *chan = &ioat->base; | 261 | struct ioat_chan_common *chan = &ioat->base; |
263 | struct ioat_ring_ent *desc; | 262 | struct ioat_ring_ent *desc; |
@@ -278,8 +277,9 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) | |||
278 | dump_desc_dbg(ioat, desc); | 277 | dump_desc_dbg(ioat, desc); |
279 | tx = &desc->txd; | 278 | tx = &desc->txd; |
280 | if (tx->cookie) { | 279 | if (tx->cookie) { |
281 | dma_cookie_complete(tx); | 280 | chan->completed_cookie = tx->cookie; |
282 | ioat3_dma_unmap(ioat, desc, idx + i); | 281 | ioat3_dma_unmap(ioat, desc, idx + i); |
282 | tx->cookie = 0; | ||
283 | if (tx->callback) { | 283 | if (tx->callback) { |
284 | tx->callback(tx->callback_param); | 284 | tx->callback(tx->callback_param); |
285 | tx->callback = NULL; | 285 | tx->callback = NULL; |
@@ -314,7 +314,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) | |||
314 | static void ioat3_cleanup(struct ioat2_dma_chan *ioat) | 314 | static void ioat3_cleanup(struct ioat2_dma_chan *ioat) |
315 | { | 315 | { |
316 | struct ioat_chan_common *chan = &ioat->base; | 316 | struct ioat_chan_common *chan = &ioat->base; |
317 | dma_addr_t phys_complete; | 317 | unsigned long phys_complete; |
318 | 318 | ||
319 | spin_lock_bh(&chan->cleanup_lock); | 319 | spin_lock_bh(&chan->cleanup_lock); |
320 | if (ioat_cleanup_preamble(chan, &phys_complete)) | 320 | if (ioat_cleanup_preamble(chan, &phys_complete)) |
@@ -333,7 +333,7 @@ static void ioat3_cleanup_event(unsigned long data) | |||
333 | static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) | 333 | static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) |
334 | { | 334 | { |
335 | struct ioat_chan_common *chan = &ioat->base; | 335 | struct ioat_chan_common *chan = &ioat->base; |
336 | dma_addr_t phys_complete; | 336 | unsigned long phys_complete; |
337 | 337 | ||
338 | ioat2_quiesce(chan, 0); | 338 | ioat2_quiesce(chan, 0); |
339 | if (ioat_cleanup_preamble(chan, &phys_complete)) | 339 | if (ioat_cleanup_preamble(chan, &phys_complete)) |
@@ -348,7 +348,7 @@ static void ioat3_timer_event(unsigned long data) | |||
348 | struct ioat_chan_common *chan = &ioat->base; | 348 | struct ioat_chan_common *chan = &ioat->base; |
349 | 349 | ||
350 | if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { | 350 | if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { |
351 | dma_addr_t phys_complete; | 351 | unsigned long phys_complete; |
352 | u64 status; | 352 | u64 status; |
353 | 353 | ||
354 | status = ioat_chansts(chan); | 354 | status = ioat_chansts(chan); |
@@ -411,15 +411,13 @@ ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie, | |||
411 | struct dma_tx_state *txstate) | 411 | struct dma_tx_state *txstate) |
412 | { | 412 | { |
413 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | 413 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); |
414 | enum dma_status ret; | ||
415 | 414 | ||
416 | ret = dma_cookie_status(c, cookie, txstate); | 415 | if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS) |
417 | if (ret == DMA_SUCCESS) | 416 | return DMA_SUCCESS; |
418 | return ret; | ||
419 | 417 | ||
420 | ioat3_cleanup(ioat); | 418 | ioat3_cleanup(ioat); |
421 | 419 | ||
422 | return dma_cookie_status(c, cookie, txstate); | 420 | return ioat_tx_status(c, cookie, txstate); |
423 | } | 421 | } |
424 | 422 | ||
425 | static struct dma_async_tx_descriptor * | 423 | static struct dma_async_tx_descriptor * |
@@ -836,7 +834,7 @@ ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags) | |||
836 | return &desc->txd; | 834 | return &desc->txd; |
837 | } | 835 | } |
838 | 836 | ||
839 | static void ioat3_dma_test_callback(void *dma_async_param) | 837 | static void __devinit ioat3_dma_test_callback(void *dma_async_param) |
840 | { | 838 | { |
841 | struct completion *cmp = dma_async_param; | 839 | struct completion *cmp = dma_async_param; |
842 | 840 | ||
@@ -844,7 +842,7 @@ static void ioat3_dma_test_callback(void *dma_async_param) | |||
844 | } | 842 | } |
845 | 843 | ||
846 | #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */ | 844 | #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */ |
847 | static int ioat_xor_val_self_test(struct ioatdma_device *device) | 845 | static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device) |
848 | { | 846 | { |
849 | int i, src_idx; | 847 | int i, src_idx; |
850 | struct page *dest; | 848 | struct page *dest; |
@@ -1096,7 +1094,7 @@ out: | |||
1096 | return err; | 1094 | return err; |
1097 | } | 1095 | } |
1098 | 1096 | ||
1099 | static int ioat3_dma_self_test(struct ioatdma_device *device) | 1097 | static int __devinit ioat3_dma_self_test(struct ioatdma_device *device) |
1100 | { | 1098 | { |
1101 | int rc = ioat_dma_self_test(device); | 1099 | int rc = ioat_dma_self_test(device); |
1102 | 1100 | ||
@@ -1149,45 +1147,7 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan) | |||
1149 | return ioat2_reset_sync(chan, msecs_to_jiffies(200)); | 1147 | return ioat2_reset_sync(chan, msecs_to_jiffies(200)); |
1150 | } | 1148 | } |
1151 | 1149 | ||
1152 | static bool is_jf_ioat(struct pci_dev *pdev) | 1150 | int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) |
1153 | { | ||
1154 | switch (pdev->device) { | ||
1155 | case PCI_DEVICE_ID_INTEL_IOAT_JSF0: | ||
1156 | case PCI_DEVICE_ID_INTEL_IOAT_JSF1: | ||
1157 | case PCI_DEVICE_ID_INTEL_IOAT_JSF2: | ||
1158 | case PCI_DEVICE_ID_INTEL_IOAT_JSF3: | ||
1159 | case PCI_DEVICE_ID_INTEL_IOAT_JSF4: | ||
1160 | case PCI_DEVICE_ID_INTEL_IOAT_JSF5: | ||
1161 | case PCI_DEVICE_ID_INTEL_IOAT_JSF6: | ||
1162 | case PCI_DEVICE_ID_INTEL_IOAT_JSF7: | ||
1163 | case PCI_DEVICE_ID_INTEL_IOAT_JSF8: | ||
1164 | case PCI_DEVICE_ID_INTEL_IOAT_JSF9: | ||
1165 | return true; | ||
1166 | default: | ||
1167 | return false; | ||
1168 | } | ||
1169 | } | ||
1170 | |||
1171 | static bool is_snb_ioat(struct pci_dev *pdev) | ||
1172 | { | ||
1173 | switch (pdev->device) { | ||
1174 | case PCI_DEVICE_ID_INTEL_IOAT_SNB0: | ||
1175 | case PCI_DEVICE_ID_INTEL_IOAT_SNB1: | ||
1176 | case PCI_DEVICE_ID_INTEL_IOAT_SNB2: | ||
1177 | case PCI_DEVICE_ID_INTEL_IOAT_SNB3: | ||
1178 | case PCI_DEVICE_ID_INTEL_IOAT_SNB4: | ||
1179 | case PCI_DEVICE_ID_INTEL_IOAT_SNB5: | ||
1180 | case PCI_DEVICE_ID_INTEL_IOAT_SNB6: | ||
1181 | case PCI_DEVICE_ID_INTEL_IOAT_SNB7: | ||
1182 | case PCI_DEVICE_ID_INTEL_IOAT_SNB8: | ||
1183 | case PCI_DEVICE_ID_INTEL_IOAT_SNB9: | ||
1184 | return true; | ||
1185 | default: | ||
1186 | return false; | ||
1187 | } | ||
1188 | } | ||
1189 | |||
1190 | int ioat3_dma_probe(struct ioatdma_device *device, int dca) | ||
1191 | { | 1151 | { |
1192 | struct pci_dev *pdev = device->pdev; | 1152 | struct pci_dev *pdev = device->pdev; |
1193 | int dca_en = system_has_dca_enabled(pdev); | 1153 | int dca_en = system_has_dca_enabled(pdev); |
@@ -1207,9 +1167,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1207 | dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; | 1167 | dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; |
1208 | dma->device_free_chan_resources = ioat2_free_chan_resources; | 1168 | dma->device_free_chan_resources = ioat2_free_chan_resources; |
1209 | 1169 | ||
1210 | if (is_jf_ioat(pdev) || is_snb_ioat(pdev)) | ||
1211 | dma->copy_align = 6; | ||
1212 | |||
1213 | dma_cap_set(DMA_INTERRUPT, dma->cap_mask); | 1170 | dma_cap_set(DMA_INTERRUPT, dma->cap_mask); |
1214 | dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; | 1171 | dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; |
1215 | 1172 | ||
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h index d2ff3fda0b1..60e675455b6 100644 --- a/drivers/dma/ioat/hw.h +++ b/drivers/dma/ioat/hw.h | |||
@@ -22,6 +22,7 @@ | |||
22 | #define _IOAT_HW_H_ | 22 | #define _IOAT_HW_H_ |
23 | 23 | ||
24 | /* PCI Configuration Space Values */ | 24 | /* PCI Configuration Space Values */ |
25 | #define IOAT_PCI_VID 0x8086 | ||
25 | #define IOAT_MMIO_BAR 0 | 26 | #define IOAT_MMIO_BAR 0 |
26 | 27 | ||
27 | /* CB device ID's */ | 28 | /* CB device ID's */ |
@@ -30,6 +31,9 @@ | |||
30 | #define IOAT_PCI_DID_SCNB 0x65FF | 31 | #define IOAT_PCI_DID_SCNB 0x65FF |
31 | #define IOAT_PCI_DID_SNB 0x402F | 32 | #define IOAT_PCI_DID_SNB 0x402F |
32 | 33 | ||
34 | #define IOAT_PCI_RID 0x00 | ||
35 | #define IOAT_PCI_SVID 0x8086 | ||
36 | #define IOAT_PCI_SID 0x8086 | ||
33 | #define IOAT_VER_1_2 0x12 /* Version 1.2 */ | 37 | #define IOAT_VER_1_2 0x12 /* Version 1.2 */ |
34 | #define IOAT_VER_2_0 0x20 /* Version 2.0 */ | 38 | #define IOAT_VER_2_0 0x20 /* Version 2.0 */ |
35 | #define IOAT_VER_3_0 0x30 /* Version 3.0 */ | 39 | #define IOAT_VER_3_0 0x30 /* Version 3.0 */ |
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index 4f686c527ab..5e3a40f7994 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c | |||
@@ -40,17 +40,6 @@ MODULE_VERSION(IOAT_DMA_VERSION); | |||
40 | MODULE_LICENSE("Dual BSD/GPL"); | 40 | MODULE_LICENSE("Dual BSD/GPL"); |
41 | MODULE_AUTHOR("Intel Corporation"); | 41 | MODULE_AUTHOR("Intel Corporation"); |
42 | 42 | ||
43 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20 | ||
44 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21 | ||
45 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22 | ||
46 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB3 0x0e23 | ||
47 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB4 0x0e24 | ||
48 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB5 0x0e25 | ||
49 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB6 0x0e26 | ||
50 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB7 0x0e27 | ||
51 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB8 0x0e2e | ||
52 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB9 0x0e2f | ||
53 | |||
54 | static struct pci_device_id ioat_pci_tbl[] = { | 43 | static struct pci_device_id ioat_pci_tbl[] = { |
55 | /* I/OAT v1 platforms */ | 44 | /* I/OAT v1 platforms */ |
56 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) }, | 45 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) }, |
@@ -94,23 +83,13 @@ static struct pci_device_id ioat_pci_tbl[] = { | |||
94 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) }, | 83 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) }, |
95 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) }, | 84 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) }, |
96 | 85 | ||
97 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) }, | ||
98 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) }, | ||
99 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) }, | ||
100 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) }, | ||
101 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) }, | ||
102 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) }, | ||
103 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) }, | ||
104 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) }, | ||
105 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) }, | ||
106 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) }, | ||
107 | |||
108 | { 0, } | 86 | { 0, } |
109 | }; | 87 | }; |
110 | MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); | 88 | MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); |
111 | 89 | ||
112 | static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); | 90 | static int __devinit ioat_pci_probe(struct pci_dev *pdev, |
113 | static void ioat_remove(struct pci_dev *pdev); | 91 | const struct pci_device_id *id); |
92 | static void __devexit ioat_remove(struct pci_dev *pdev); | ||
114 | 93 | ||
115 | static int ioat_dca_enabled = 1; | 94 | static int ioat_dca_enabled = 1; |
116 | module_param(ioat_dca_enabled, int, 0644); | 95 | module_param(ioat_dca_enabled, int, 0644); |
@@ -124,7 +103,7 @@ static struct pci_driver ioat_pci_driver = { | |||
124 | .name = DRV_NAME, | 103 | .name = DRV_NAME, |
125 | .id_table = ioat_pci_tbl, | 104 | .id_table = ioat_pci_tbl, |
126 | .probe = ioat_pci_probe, | 105 | .probe = ioat_pci_probe, |
127 | .remove = ioat_remove, | 106 | .remove = __devexit_p(ioat_remove), |
128 | }; | 107 | }; |
129 | 108 | ||
130 | static struct ioatdma_device * | 109 | static struct ioatdma_device * |
@@ -140,7 +119,7 @@ alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase) | |||
140 | return d; | 119 | return d; |
141 | } | 120 | } |
142 | 121 | ||
143 | static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | 122 | static int __devinit ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
144 | { | 123 | { |
145 | void __iomem * const *iomap; | 124 | void __iomem * const *iomap; |
146 | struct device *dev = &pdev->dev; | 125 | struct device *dev = &pdev->dev; |
@@ -194,7 +173,7 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
194 | return 0; | 173 | return 0; |
195 | } | 174 | } |
196 | 175 | ||
197 | static void ioat_remove(struct pci_dev *pdev) | 176 | static void __devexit ioat_remove(struct pci_dev *pdev) |
198 | { | 177 | { |
199 | struct ioatdma_device *device = pci_get_drvdata(pdev); | 178 | struct ioatdma_device *device = pci_get_drvdata(pdev); |
200 | 179 | ||
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index eacb8be9981..e03f811a83d 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -36,8 +36,6 @@ | |||
36 | 36 | ||
37 | #include <mach/adma.h> | 37 | #include <mach/adma.h> |
38 | 38 | ||
39 | #include "dmaengine.h" | ||
40 | |||
41 | #define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common) | 39 | #define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common) |
42 | #define to_iop_adma_device(dev) \ | 40 | #define to_iop_adma_device(dev) \ |
43 | container_of(dev, struct iop_adma_device, common) | 41 | container_of(dev, struct iop_adma_device, common) |
@@ -319,7 +317,7 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) | |||
319 | } | 317 | } |
320 | 318 | ||
321 | if (cookie > 0) { | 319 | if (cookie > 0) { |
322 | iop_chan->common.completed_cookie = cookie; | 320 | iop_chan->completed_cookie = cookie; |
323 | pr_debug("\tcompleted cookie %d\n", cookie); | 321 | pr_debug("\tcompleted cookie %d\n", cookie); |
324 | } | 322 | } |
325 | } | 323 | } |
@@ -440,6 +438,18 @@ retry: | |||
440 | return NULL; | 438 | return NULL; |
441 | } | 439 | } |
442 | 440 | ||
441 | static dma_cookie_t | ||
442 | iop_desc_assign_cookie(struct iop_adma_chan *iop_chan, | ||
443 | struct iop_adma_desc_slot *desc) | ||
444 | { | ||
445 | dma_cookie_t cookie = iop_chan->common.cookie; | ||
446 | cookie++; | ||
447 | if (cookie < 0) | ||
448 | cookie = 1; | ||
449 | iop_chan->common.cookie = desc->async_tx.cookie = cookie; | ||
450 | return cookie; | ||
451 | } | ||
452 | |||
443 | static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan) | 453 | static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan) |
444 | { | 454 | { |
445 | dev_dbg(iop_chan->device->common.dev, "pending: %d\n", | 455 | dev_dbg(iop_chan->device->common.dev, "pending: %d\n", |
@@ -467,7 +477,7 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
467 | slots_per_op = grp_start->slots_per_op; | 477 | slots_per_op = grp_start->slots_per_op; |
468 | 478 | ||
469 | spin_lock_bh(&iop_chan->lock); | 479 | spin_lock_bh(&iop_chan->lock); |
470 | cookie = dma_cookie_assign(tx); | 480 | cookie = iop_desc_assign_cookie(iop_chan, sw_desc); |
471 | 481 | ||
472 | old_chain_tail = list_entry(iop_chan->chain.prev, | 482 | old_chain_tail = list_entry(iop_chan->chain.prev, |
473 | struct iop_adma_desc_slot, chain_node); | 483 | struct iop_adma_desc_slot, chain_node); |
@@ -894,15 +904,24 @@ static enum dma_status iop_adma_status(struct dma_chan *chan, | |||
894 | struct dma_tx_state *txstate) | 904 | struct dma_tx_state *txstate) |
895 | { | 905 | { |
896 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); | 906 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); |
897 | int ret; | 907 | dma_cookie_t last_used; |
898 | 908 | dma_cookie_t last_complete; | |
899 | ret = dma_cookie_status(chan, cookie, txstate); | 909 | enum dma_status ret; |
910 | |||
911 | last_used = chan->cookie; | ||
912 | last_complete = iop_chan->completed_cookie; | ||
913 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
914 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
900 | if (ret == DMA_SUCCESS) | 915 | if (ret == DMA_SUCCESS) |
901 | return ret; | 916 | return ret; |
902 | 917 | ||
903 | iop_adma_slot_cleanup(iop_chan); | 918 | iop_adma_slot_cleanup(iop_chan); |
904 | 919 | ||
905 | return dma_cookie_status(chan, cookie, txstate); | 920 | last_used = chan->cookie; |
921 | last_complete = iop_chan->completed_cookie; | ||
922 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
923 | |||
924 | return dma_async_is_complete(cookie, last_complete, last_used); | ||
906 | } | 925 | } |
907 | 926 | ||
908 | static irqreturn_t iop_adma_eot_handler(int irq, void *data) | 927 | static irqreturn_t iop_adma_eot_handler(int irq, void *data) |
@@ -968,7 +987,7 @@ static void iop_adma_issue_pending(struct dma_chan *chan) | |||
968 | */ | 987 | */ |
969 | #define IOP_ADMA_TEST_SIZE 2000 | 988 | #define IOP_ADMA_TEST_SIZE 2000 |
970 | 989 | ||
971 | static int iop_adma_memcpy_self_test(struct iop_adma_device *device) | 990 | static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device) |
972 | { | 991 | { |
973 | int i; | 992 | int i; |
974 | void *src, *dest; | 993 | void *src, *dest; |
@@ -1042,7 +1061,7 @@ out: | |||
1042 | } | 1061 | } |
1043 | 1062 | ||
1044 | #define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */ | 1063 | #define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */ |
1045 | static int | 1064 | static int __devinit |
1046 | iop_adma_xor_val_self_test(struct iop_adma_device *device) | 1065 | iop_adma_xor_val_self_test(struct iop_adma_device *device) |
1047 | { | 1066 | { |
1048 | int i, src_idx; | 1067 | int i, src_idx; |
@@ -1243,7 +1262,7 @@ out: | |||
1243 | } | 1262 | } |
1244 | 1263 | ||
1245 | #ifdef CONFIG_RAID6_PQ | 1264 | #ifdef CONFIG_RAID6_PQ |
1246 | static int | 1265 | static int __devinit |
1247 | iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) | 1266 | iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) |
1248 | { | 1267 | { |
1249 | /* combined sources, software pq results, and extra hw pq results */ | 1268 | /* combined sources, software pq results, and extra hw pq results */ |
@@ -1252,8 +1271,8 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) | |||
1252 | struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2]; | 1271 | struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2]; |
1253 | /* address conversion buffers (dma_map / page_address) */ | 1272 | /* address conversion buffers (dma_map / page_address) */ |
1254 | void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2]; | 1273 | void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2]; |
1255 | dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST+2]; | 1274 | dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST]; |
1256 | dma_addr_t *pq_dest = &pq_src[IOP_ADMA_NUM_SRC_TEST]; | 1275 | dma_addr_t pq_dest[2]; |
1257 | 1276 | ||
1258 | int i; | 1277 | int i; |
1259 | struct dma_async_tx_descriptor *tx; | 1278 | struct dma_async_tx_descriptor *tx; |
@@ -1406,7 +1425,7 @@ out: | |||
1406 | } | 1425 | } |
1407 | #endif | 1426 | #endif |
1408 | 1427 | ||
1409 | static int iop_adma_remove(struct platform_device *dev) | 1428 | static int __devexit iop_adma_remove(struct platform_device *dev) |
1410 | { | 1429 | { |
1411 | struct iop_adma_device *device = platform_get_drvdata(dev); | 1430 | struct iop_adma_device *device = platform_get_drvdata(dev); |
1412 | struct dma_chan *chan, *_chan; | 1431 | struct dma_chan *chan, *_chan; |
@@ -1429,7 +1448,7 @@ static int iop_adma_remove(struct platform_device *dev) | |||
1429 | return 0; | 1448 | return 0; |
1430 | } | 1449 | } |
1431 | 1450 | ||
1432 | static int iop_adma_probe(struct platform_device *pdev) | 1451 | static int __devinit iop_adma_probe(struct platform_device *pdev) |
1433 | { | 1452 | { |
1434 | struct resource *res; | 1453 | struct resource *res; |
1435 | int ret = 0, i; | 1454 | int ret = 0, i; |
@@ -1463,7 +1482,7 @@ static int iop_adma_probe(struct platform_device *pdev) | |||
1463 | goto err_free_adev; | 1482 | goto err_free_adev; |
1464 | } | 1483 | } |
1465 | 1484 | ||
1466 | dev_dbg(&pdev->dev, "%s: allocated descriptor pool virt %p phys %p\n", | 1485 | dev_dbg(&pdev->dev, "%s: allocted descriptor pool virt %p phys %p\n", |
1467 | __func__, adev->dma_desc_pool_virt, | 1486 | __func__, adev->dma_desc_pool_virt, |
1468 | (void *) adev->dma_desc_pool); | 1487 | (void *) adev->dma_desc_pool); |
1469 | 1488 | ||
@@ -1546,7 +1565,6 @@ static int iop_adma_probe(struct platform_device *pdev) | |||
1546 | INIT_LIST_HEAD(&iop_chan->chain); | 1565 | INIT_LIST_HEAD(&iop_chan->chain); |
1547 | INIT_LIST_HEAD(&iop_chan->all_slots); | 1566 | INIT_LIST_HEAD(&iop_chan->all_slots); |
1548 | iop_chan->common.device = dma_dev; | 1567 | iop_chan->common.device = dma_dev; |
1549 | dma_cookie_init(&iop_chan->common); | ||
1550 | list_add_tail(&iop_chan->common.device_node, &dma_dev->channels); | 1568 | list_add_tail(&iop_chan->common.device_node, &dma_dev->channels); |
1551 | 1569 | ||
1552 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { | 1570 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { |
@@ -1624,12 +1642,16 @@ static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan) | |||
1624 | iop_desc_set_dest_addr(grp_start, iop_chan, 0); | 1642 | iop_desc_set_dest_addr(grp_start, iop_chan, 0); |
1625 | iop_desc_set_memcpy_src_addr(grp_start, 0); | 1643 | iop_desc_set_memcpy_src_addr(grp_start, 0); |
1626 | 1644 | ||
1627 | cookie = dma_cookie_assign(&sw_desc->async_tx); | 1645 | cookie = iop_chan->common.cookie; |
1646 | cookie++; | ||
1647 | if (cookie <= 1) | ||
1648 | cookie = 2; | ||
1628 | 1649 | ||
1629 | /* initialize the completed cookie to be less than | 1650 | /* initialize the completed cookie to be less than |
1630 | * the most recently used cookie | 1651 | * the most recently used cookie |
1631 | */ | 1652 | */ |
1632 | iop_chan->common.completed_cookie = cookie - 1; | 1653 | iop_chan->completed_cookie = cookie - 1; |
1654 | iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie; | ||
1633 | 1655 | ||
1634 | /* channel should not be busy */ | 1656 | /* channel should not be busy */ |
1635 | BUG_ON(iop_chan_is_busy(iop_chan)); | 1657 | BUG_ON(iop_chan_is_busy(iop_chan)); |
@@ -1677,12 +1699,16 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan) | |||
1677 | iop_desc_set_xor_src_addr(grp_start, 0, 0); | 1699 | iop_desc_set_xor_src_addr(grp_start, 0, 0); |
1678 | iop_desc_set_xor_src_addr(grp_start, 1, 0); | 1700 | iop_desc_set_xor_src_addr(grp_start, 1, 0); |
1679 | 1701 | ||
1680 | cookie = dma_cookie_assign(&sw_desc->async_tx); | 1702 | cookie = iop_chan->common.cookie; |
1703 | cookie++; | ||
1704 | if (cookie <= 1) | ||
1705 | cookie = 2; | ||
1681 | 1706 | ||
1682 | /* initialize the completed cookie to be less than | 1707 | /* initialize the completed cookie to be less than |
1683 | * the most recently used cookie | 1708 | * the most recently used cookie |
1684 | */ | 1709 | */ |
1685 | iop_chan->common.completed_cookie = cookie - 1; | 1710 | iop_chan->completed_cookie = cookie - 1; |
1711 | iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie; | ||
1686 | 1712 | ||
1687 | /* channel should not be busy */ | 1713 | /* channel should not be busy */ |
1688 | BUG_ON(iop_chan_is_busy(iop_chan)); | 1714 | BUG_ON(iop_chan_is_busy(iop_chan)); |
@@ -1709,18 +1735,30 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan) | |||
1709 | spin_unlock_bh(&iop_chan->lock); | 1735 | spin_unlock_bh(&iop_chan->lock); |
1710 | } | 1736 | } |
1711 | 1737 | ||
1738 | MODULE_ALIAS("platform:iop-adma"); | ||
1739 | |||
1712 | static struct platform_driver iop_adma_driver = { | 1740 | static struct platform_driver iop_adma_driver = { |
1713 | .probe = iop_adma_probe, | 1741 | .probe = iop_adma_probe, |
1714 | .remove = iop_adma_remove, | 1742 | .remove = __devexit_p(iop_adma_remove), |
1715 | .driver = { | 1743 | .driver = { |
1716 | .owner = THIS_MODULE, | 1744 | .owner = THIS_MODULE, |
1717 | .name = "iop-adma", | 1745 | .name = "iop-adma", |
1718 | }, | 1746 | }, |
1719 | }; | 1747 | }; |
1720 | 1748 | ||
1721 | module_platform_driver(iop_adma_driver); | 1749 | static int __init iop_adma_init (void) |
1750 | { | ||
1751 | return platform_driver_register(&iop_adma_driver); | ||
1752 | } | ||
1753 | |||
1754 | static void __exit iop_adma_exit (void) | ||
1755 | { | ||
1756 | platform_driver_unregister(&iop_adma_driver); | ||
1757 | return; | ||
1758 | } | ||
1759 | module_exit(iop_adma_exit); | ||
1760 | module_init(iop_adma_init); | ||
1722 | 1761 | ||
1723 | MODULE_AUTHOR("Intel Corporation"); | 1762 | MODULE_AUTHOR("Intel Corporation"); |
1724 | MODULE_DESCRIPTION("IOP ADMA Engine Driver"); | 1763 | MODULE_DESCRIPTION("IOP ADMA Engine Driver"); |
1725 | MODULE_LICENSE("GPL"); | 1764 | MODULE_LICENSE("GPL"); |
1726 | MODULE_ALIAS("platform:iop-adma"); | ||
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index 65855373cee..6815905a772 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c | |||
@@ -21,10 +21,9 @@ | |||
21 | #include <linux/string.h> | 21 | #include <linux/string.h> |
22 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
23 | #include <linux/io.h> | 23 | #include <linux/io.h> |
24 | #include <linux/module.h> | ||
25 | #include <linux/dma/ipu-dma.h> | ||
26 | 24 | ||
27 | #include "../dmaengine.h" | 25 | #include <mach/ipu.h> |
26 | |||
28 | #include "ipu_intern.h" | 27 | #include "ipu_intern.h" |
29 | 28 | ||
30 | #define FS_VF_IN_VALID 0x00000002 | 29 | #define FS_VF_IN_VALID 0x00000002 |
@@ -312,7 +311,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params, | |||
312 | case IPU_PIX_FMT_RGB565: | 311 | case IPU_PIX_FMT_RGB565: |
313 | params->ip.bpp = 2; | 312 | params->ip.bpp = 2; |
314 | params->ip.pfs = 4; | 313 | params->ip.pfs = 4; |
315 | params->ip.npb = 15; | 314 | params->ip.npb = 7; |
316 | params->ip.sat = 2; /* SAT = 32-bit access */ | 315 | params->ip.sat = 2; /* SAT = 32-bit access */ |
317 | params->ip.ofs0 = 0; /* Red bit offset */ | 316 | params->ip.ofs0 = 0; /* Red bit offset */ |
318 | params->ip.ofs1 = 5; /* Green bit offset */ | 317 | params->ip.ofs1 = 5; /* Green bit offset */ |
@@ -422,6 +421,12 @@ static void ipu_ch_param_set_size(union chan_param_mem *params, | |||
422 | params->pp.nsb = 1; | 421 | params->pp.nsb = 1; |
423 | } | 422 | } |
424 | 423 | ||
424 | static void ipu_ch_param_set_burst_size(union chan_param_mem *params, | ||
425 | uint16_t burst_pixels) | ||
426 | { | ||
427 | params->pp.npb = burst_pixels - 1; | ||
428 | } | ||
429 | |||
425 | static void ipu_ch_param_set_buffer(union chan_param_mem *params, | 430 | static void ipu_ch_param_set_buffer(union chan_param_mem *params, |
426 | dma_addr_t buf0, dma_addr_t buf1) | 431 | dma_addr_t buf0, dma_addr_t buf1) |
427 | { | 432 | { |
@@ -684,6 +689,23 @@ static int ipu_init_channel_buffer(struct idmac_channel *ichan, | |||
684 | ipu_ch_param_set_size(¶ms, pixel_fmt, width, height, stride_bytes); | 689 | ipu_ch_param_set_size(¶ms, pixel_fmt, width, height, stride_bytes); |
685 | ipu_ch_param_set_buffer(¶ms, phyaddr_0, phyaddr_1); | 690 | ipu_ch_param_set_buffer(¶ms, phyaddr_0, phyaddr_1); |
686 | ipu_ch_param_set_rotation(¶ms, rot_mode); | 691 | ipu_ch_param_set_rotation(¶ms, rot_mode); |
692 | /* Some channels (rotation) have restriction on burst length */ | ||
693 | switch (channel) { | ||
694 | case IDMAC_IC_7: /* Hangs with burst 8, 16, other values | ||
695 | invalid - Table 44-30 */ | ||
696 | /* | ||
697 | ipu_ch_param_set_burst_size(¶ms, 8); | ||
698 | */ | ||
699 | break; | ||
700 | case IDMAC_SDC_0: | ||
701 | case IDMAC_SDC_1: | ||
702 | /* In original code only IPU_PIX_FMT_RGB565 was setting burst */ | ||
703 | ipu_ch_param_set_burst_size(¶ms, 16); | ||
704 | break; | ||
705 | case IDMAC_IC_0: | ||
706 | default: | ||
707 | break; | ||
708 | } | ||
687 | 709 | ||
688 | spin_lock_irqsave(&ipu->lock, flags); | 710 | spin_lock_irqsave(&ipu->lock, flags); |
689 | 711 | ||
@@ -866,7 +888,14 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx) | |||
866 | 888 | ||
867 | dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]); | 889 | dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]); |
868 | 890 | ||
869 | cookie = dma_cookie_assign(tx); | 891 | cookie = ichan->dma_chan.cookie; |
892 | |||
893 | if (++cookie < 0) | ||
894 | cookie = 1; | ||
895 | |||
896 | /* from dmaengine.h: "last cookie value returned to client" */ | ||
897 | ichan->dma_chan.cookie = cookie; | ||
898 | tx->cookie = cookie; | ||
870 | 899 | ||
871 | /* ipu->lock can be taken under ichan->lock, but not v.v. */ | 900 | /* ipu->lock can be taken under ichan->lock, but not v.v. */ |
872 | spin_lock_irqsave(&ichan->lock, flags); | 901 | spin_lock_irqsave(&ichan->lock, flags); |
@@ -1278,7 +1307,6 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) | |||
1278 | ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) { | 1307 | ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) { |
1279 | callback = descnew->txd.callback; | 1308 | callback = descnew->txd.callback; |
1280 | callback_param = descnew->txd.callback_param; | 1309 | callback_param = descnew->txd.callback_param; |
1281 | list_del_init(&descnew->list); | ||
1282 | spin_unlock(&ichan->lock); | 1310 | spin_unlock(&ichan->lock); |
1283 | if (callback) | 1311 | if (callback) |
1284 | callback(callback_param); | 1312 | callback(callback_param); |
@@ -1288,7 +1316,7 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) | |||
1288 | /* Flip the active buffer - even if update above failed */ | 1316 | /* Flip the active buffer - even if update above failed */ |
1289 | ichan->active_buffer = !ichan->active_buffer; | 1317 | ichan->active_buffer = !ichan->active_buffer; |
1290 | if (done) | 1318 | if (done) |
1291 | dma_cookie_complete(&desc->txd); | 1319 | ichan->completed = desc->txd.cookie; |
1292 | 1320 | ||
1293 | callback = desc->txd.callback; | 1321 | callback = desc->txd.callback; |
1294 | callback_param = desc->txd.callback_param; | 1322 | callback_param = desc->txd.callback_param; |
@@ -1334,8 +1362,7 @@ static void ipu_gc_tasklet(unsigned long arg) | |||
1334 | /* Allocate and initialise a transfer descriptor. */ | 1362 | /* Allocate and initialise a transfer descriptor. */ |
1335 | static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan, | 1363 | static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan, |
1336 | struct scatterlist *sgl, unsigned int sg_len, | 1364 | struct scatterlist *sgl, unsigned int sg_len, |
1337 | enum dma_transfer_direction direction, unsigned long tx_flags, | 1365 | enum dma_data_direction direction, unsigned long tx_flags) |
1338 | void *context) | ||
1339 | { | 1366 | { |
1340 | struct idmac_channel *ichan = to_idmac_chan(chan); | 1367 | struct idmac_channel *ichan = to_idmac_chan(chan); |
1341 | struct idmac_tx_desc *desc = NULL; | 1368 | struct idmac_tx_desc *desc = NULL; |
@@ -1347,7 +1374,7 @@ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan | |||
1347 | chan->chan_id != IDMAC_IC_7) | 1374 | chan->chan_id != IDMAC_IC_7) |
1348 | return NULL; | 1375 | return NULL; |
1349 | 1376 | ||
1350 | if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) { | 1377 | if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) { |
1351 | dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction); | 1378 | dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction); |
1352 | return NULL; | 1379 | return NULL; |
1353 | } | 1380 | } |
@@ -1401,58 +1428,39 @@ static int __idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1401 | { | 1428 | { |
1402 | struct idmac_channel *ichan = to_idmac_chan(chan); | 1429 | struct idmac_channel *ichan = to_idmac_chan(chan); |
1403 | struct idmac *idmac = to_idmac(chan->device); | 1430 | struct idmac *idmac = to_idmac(chan->device); |
1404 | struct ipu *ipu = to_ipu(idmac); | ||
1405 | struct list_head *list, *tmp; | ||
1406 | unsigned long flags; | 1431 | unsigned long flags; |
1407 | int i; | 1432 | int i; |
1408 | 1433 | ||
1409 | switch (cmd) { | 1434 | /* Only supports DMA_TERMINATE_ALL */ |
1410 | case DMA_PAUSE: | 1435 | if (cmd != DMA_TERMINATE_ALL) |
1411 | spin_lock_irqsave(&ipu->lock, flags); | 1436 | return -ENXIO; |
1412 | ipu_ic_disable_task(ipu, chan->chan_id); | ||
1413 | |||
1414 | /* Return all descriptors into "prepared" state */ | ||
1415 | list_for_each_safe(list, tmp, &ichan->queue) | ||
1416 | list_del_init(list); | ||
1417 | 1437 | ||
1418 | ichan->sg[0] = NULL; | 1438 | ipu_disable_channel(idmac, ichan, |
1419 | ichan->sg[1] = NULL; | 1439 | ichan->status >= IPU_CHANNEL_ENABLED); |
1420 | 1440 | ||
1421 | spin_unlock_irqrestore(&ipu->lock, flags); | 1441 | tasklet_disable(&to_ipu(idmac)->tasklet); |
1422 | |||
1423 | ichan->status = IPU_CHANNEL_INITIALIZED; | ||
1424 | break; | ||
1425 | case DMA_TERMINATE_ALL: | ||
1426 | ipu_disable_channel(idmac, ichan, | ||
1427 | ichan->status >= IPU_CHANNEL_ENABLED); | ||
1428 | |||
1429 | tasklet_disable(&ipu->tasklet); | ||
1430 | 1442 | ||
1431 | /* ichan->queue is modified in ISR, have to spinlock */ | 1443 | /* ichan->queue is modified in ISR, have to spinlock */ |
1432 | spin_lock_irqsave(&ichan->lock, flags); | 1444 | spin_lock_irqsave(&ichan->lock, flags); |
1433 | list_splice_init(&ichan->queue, &ichan->free_list); | 1445 | list_splice_init(&ichan->queue, &ichan->free_list); |
1434 | 1446 | ||
1435 | if (ichan->desc) | 1447 | if (ichan->desc) |
1436 | for (i = 0; i < ichan->n_tx_desc; i++) { | 1448 | for (i = 0; i < ichan->n_tx_desc; i++) { |
1437 | struct idmac_tx_desc *desc = ichan->desc + i; | 1449 | struct idmac_tx_desc *desc = ichan->desc + i; |
1438 | if (list_empty(&desc->list)) | 1450 | if (list_empty(&desc->list)) |
1439 | /* Descriptor was prepared, but not submitted */ | 1451 | /* Descriptor was prepared, but not submitted */ |
1440 | list_add(&desc->list, &ichan->free_list); | 1452 | list_add(&desc->list, &ichan->free_list); |
1441 | 1453 | ||
1442 | async_tx_clear_ack(&desc->txd); | 1454 | async_tx_clear_ack(&desc->txd); |
1443 | } | 1455 | } |
1444 | 1456 | ||
1445 | ichan->sg[0] = NULL; | 1457 | ichan->sg[0] = NULL; |
1446 | ichan->sg[1] = NULL; | 1458 | ichan->sg[1] = NULL; |
1447 | spin_unlock_irqrestore(&ichan->lock, flags); | 1459 | spin_unlock_irqrestore(&ichan->lock, flags); |
1448 | 1460 | ||
1449 | tasklet_enable(&ipu->tasklet); | 1461 | tasklet_enable(&to_ipu(idmac)->tasklet); |
1450 | 1462 | ||
1451 | ichan->status = IPU_CHANNEL_INITIALIZED; | 1463 | ichan->status = IPU_CHANNEL_INITIALIZED; |
1452 | break; | ||
1453 | default: | ||
1454 | return -ENOSYS; | ||
1455 | } | ||
1456 | 1464 | ||
1457 | return 0; | 1465 | return 0; |
1458 | } | 1466 | } |
@@ -1504,7 +1512,8 @@ static int idmac_alloc_chan_resources(struct dma_chan *chan) | |||
1504 | BUG_ON(chan->client_count > 1); | 1512 | BUG_ON(chan->client_count > 1); |
1505 | WARN_ON(ichan->status != IPU_CHANNEL_FREE); | 1513 | WARN_ON(ichan->status != IPU_CHANNEL_FREE); |
1506 | 1514 | ||
1507 | dma_cookie_init(chan); | 1515 | chan->cookie = 1; |
1516 | ichan->completed = -ENXIO; | ||
1508 | 1517 | ||
1509 | ret = ipu_irq_map(chan->chan_id); | 1518 | ret = ipu_irq_map(chan->chan_id); |
1510 | if (ret < 0) | 1519 | if (ret < 0) |
@@ -1593,7 +1602,9 @@ static void idmac_free_chan_resources(struct dma_chan *chan) | |||
1593 | static enum dma_status idmac_tx_status(struct dma_chan *chan, | 1602 | static enum dma_status idmac_tx_status(struct dma_chan *chan, |
1594 | dma_cookie_t cookie, struct dma_tx_state *txstate) | 1603 | dma_cookie_t cookie, struct dma_tx_state *txstate) |
1595 | { | 1604 | { |
1596 | dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0); | 1605 | struct idmac_channel *ichan = to_idmac_chan(chan); |
1606 | |||
1607 | dma_set_tx_state(txstate, ichan->completed, chan->cookie, 0); | ||
1597 | if (cookie != chan->cookie) | 1608 | if (cookie != chan->cookie) |
1598 | return DMA_ERROR; | 1609 | return DMA_ERROR; |
1599 | return DMA_SUCCESS; | 1610 | return DMA_SUCCESS; |
@@ -1629,10 +1640,11 @@ static int __init ipu_idmac_init(struct ipu *ipu) | |||
1629 | 1640 | ||
1630 | ichan->status = IPU_CHANNEL_FREE; | 1641 | ichan->status = IPU_CHANNEL_FREE; |
1631 | ichan->sec_chan_en = false; | 1642 | ichan->sec_chan_en = false; |
1643 | ichan->completed = -ENXIO; | ||
1632 | snprintf(ichan->eof_name, sizeof(ichan->eof_name), "IDMAC EOF %d", i); | 1644 | snprintf(ichan->eof_name, sizeof(ichan->eof_name), "IDMAC EOF %d", i); |
1633 | 1645 | ||
1634 | dma_chan->device = &idmac->dma; | 1646 | dma_chan->device = &idmac->dma; |
1635 | dma_cookie_init(dma_chan); | 1647 | dma_chan->cookie = 1; |
1636 | dma_chan->chan_id = i; | 1648 | dma_chan->chan_id = i; |
1637 | list_add_tail(&dma_chan->device_node, &dma->channels); | 1649 | list_add_tail(&dma_chan->device_node, &dma->channels); |
1638 | } | 1650 | } |
@@ -1651,6 +1663,7 @@ static void __exit ipu_idmac_exit(struct ipu *ipu) | |||
1651 | struct idmac_channel *ichan = ipu->channel + i; | 1663 | struct idmac_channel *ichan = ipu->channel + i; |
1652 | 1664 | ||
1653 | idmac_control(&ichan->dma_chan, DMA_TERMINATE_ALL, 0); | 1665 | idmac_control(&ichan->dma_chan, DMA_TERMINATE_ALL, 0); |
1666 | idmac_prep_slave_sg(&ichan->dma_chan, NULL, 0, DMA_NONE, 0); | ||
1654 | } | 1667 | } |
1655 | 1668 | ||
1656 | dma_async_device_unregister(&idmac->dma); | 1669 | dma_async_device_unregister(&idmac->dma); |
@@ -1662,6 +1675,7 @@ static void __exit ipu_idmac_exit(struct ipu *ipu) | |||
1662 | 1675 | ||
1663 | static int __init ipu_probe(struct platform_device *pdev) | 1676 | static int __init ipu_probe(struct platform_device *pdev) |
1664 | { | 1677 | { |
1678 | struct ipu_platform_data *pdata = pdev->dev.platform_data; | ||
1665 | struct resource *mem_ipu, *mem_ic; | 1679 | struct resource *mem_ipu, *mem_ic; |
1666 | int ret; | 1680 | int ret; |
1667 | 1681 | ||
@@ -1669,7 +1683,7 @@ static int __init ipu_probe(struct platform_device *pdev) | |||
1669 | 1683 | ||
1670 | mem_ipu = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1684 | mem_ipu = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1671 | mem_ic = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 1685 | mem_ic = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
1672 | if (!mem_ipu || !mem_ic) | 1686 | if (!pdata || !mem_ipu || !mem_ic) |
1673 | return -EINVAL; | 1687 | return -EINVAL; |
1674 | 1688 | ||
1675 | ipu_data.dev = &pdev->dev; | 1689 | ipu_data.dev = &pdev->dev; |
@@ -1686,9 +1700,10 @@ static int __init ipu_probe(struct platform_device *pdev) | |||
1686 | goto err_noirq; | 1700 | goto err_noirq; |
1687 | 1701 | ||
1688 | ipu_data.irq_err = ret; | 1702 | ipu_data.irq_err = ret; |
1703 | ipu_data.irq_base = pdata->irq_base; | ||
1689 | 1704 | ||
1690 | dev_dbg(&pdev->dev, "fn irq %u, err irq %u\n", | 1705 | dev_dbg(&pdev->dev, "fn irq %u, err irq %u, irq-base %u\n", |
1691 | ipu_data.irq_fn, ipu_data.irq_err); | 1706 | ipu_data.irq_fn, ipu_data.irq_err, ipu_data.irq_base); |
1692 | 1707 | ||
1693 | /* Remap IPU common registers */ | 1708 | /* Remap IPU common registers */ |
1694 | ipu_data.reg_ipu = ioremap(mem_ipu->start, resource_size(mem_ipu)); | 1709 | ipu_data.reg_ipu = ioremap(mem_ipu->start, resource_size(mem_ipu)); |
@@ -1712,7 +1727,7 @@ static int __init ipu_probe(struct platform_device *pdev) | |||
1712 | } | 1727 | } |
1713 | 1728 | ||
1714 | /* Make sure IPU HSP clock is running */ | 1729 | /* Make sure IPU HSP clock is running */ |
1715 | clk_prepare_enable(ipu_data.ipu_clk); | 1730 | clk_enable(ipu_data.ipu_clk); |
1716 | 1731 | ||
1717 | /* Disable all interrupts */ | 1732 | /* Disable all interrupts */ |
1718 | idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_1); | 1733 | idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_1); |
@@ -1744,7 +1759,7 @@ static int __init ipu_probe(struct platform_device *pdev) | |||
1744 | err_idmac_init: | 1759 | err_idmac_init: |
1745 | err_attach_irq: | 1760 | err_attach_irq: |
1746 | ipu_irq_detach_irq(&ipu_data, pdev); | 1761 | ipu_irq_detach_irq(&ipu_data, pdev); |
1747 | clk_disable_unprepare(ipu_data.ipu_clk); | 1762 | clk_disable(ipu_data.ipu_clk); |
1748 | clk_put(ipu_data.ipu_clk); | 1763 | clk_put(ipu_data.ipu_clk); |
1749 | err_clk_get: | 1764 | err_clk_get: |
1750 | iounmap(ipu_data.reg_ic); | 1765 | iounmap(ipu_data.reg_ic); |
@@ -1762,7 +1777,7 @@ static int __exit ipu_remove(struct platform_device *pdev) | |||
1762 | 1777 | ||
1763 | ipu_idmac_exit(ipu); | 1778 | ipu_idmac_exit(ipu); |
1764 | ipu_irq_detach_irq(ipu, pdev); | 1779 | ipu_irq_detach_irq(ipu, pdev); |
1765 | clk_disable_unprepare(ipu->ipu_clk); | 1780 | clk_disable(ipu->ipu_clk); |
1766 | clk_put(ipu->ipu_clk); | 1781 | clk_put(ipu->ipu_clk); |
1767 | iounmap(ipu->reg_ic); | 1782 | iounmap(ipu->reg_ic); |
1768 | iounmap(ipu->reg_ipu); | 1783 | iounmap(ipu->reg_ipu); |
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c index a5ee37d5320..ab8a4eff072 100644 --- a/drivers/dma/ipu/ipu_irq.c +++ b/drivers/dma/ipu/ipu_irq.c | |||
@@ -14,8 +14,8 @@ | |||
14 | #include <linux/clk.h> | 14 | #include <linux/clk.h> |
15 | #include <linux/irq.h> | 15 | #include <linux/irq.h> |
16 | #include <linux/io.h> | 16 | #include <linux/io.h> |
17 | #include <linux/module.h> | 17 | |
18 | #include <linux/dma/ipu-dma.h> | 18 | #include <mach/ipu.h> |
19 | 19 | ||
20 | #include "ipu_intern.h" | 20 | #include "ipu_intern.h" |
21 | 21 | ||
@@ -81,7 +81,7 @@ static struct ipu_irq_map irq_map[CONFIG_MX3_IPU_IRQS]; | |||
81 | /* Protects allocations from the above array of maps */ | 81 | /* Protects allocations from the above array of maps */ |
82 | static DEFINE_MUTEX(map_lock); | 82 | static DEFINE_MUTEX(map_lock); |
83 | /* Protects register accesses and individual mappings */ | 83 | /* Protects register accesses and individual mappings */ |
84 | static DEFINE_RAW_SPINLOCK(bank_lock); | 84 | static DEFINE_SPINLOCK(bank_lock); |
85 | 85 | ||
86 | static struct ipu_irq_map *src2map(unsigned int src) | 86 | static struct ipu_irq_map *src2map(unsigned int src) |
87 | { | 87 | { |
@@ -101,11 +101,11 @@ static void ipu_irq_unmask(struct irq_data *d) | |||
101 | uint32_t reg; | 101 | uint32_t reg; |
102 | unsigned long lock_flags; | 102 | unsigned long lock_flags; |
103 | 103 | ||
104 | raw_spin_lock_irqsave(&bank_lock, lock_flags); | 104 | spin_lock_irqsave(&bank_lock, lock_flags); |
105 | 105 | ||
106 | bank = map->bank; | 106 | bank = map->bank; |
107 | if (!bank) { | 107 | if (!bank) { |
108 | raw_spin_unlock_irqrestore(&bank_lock, lock_flags); | 108 | spin_unlock_irqrestore(&bank_lock, lock_flags); |
109 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq); | 109 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq); |
110 | return; | 110 | return; |
111 | } | 111 | } |
@@ -114,7 +114,7 @@ static void ipu_irq_unmask(struct irq_data *d) | |||
114 | reg |= (1UL << (map->source & 31)); | 114 | reg |= (1UL << (map->source & 31)); |
115 | ipu_write_reg(bank->ipu, reg, bank->control); | 115 | ipu_write_reg(bank->ipu, reg, bank->control); |
116 | 116 | ||
117 | raw_spin_unlock_irqrestore(&bank_lock, lock_flags); | 117 | spin_unlock_irqrestore(&bank_lock, lock_flags); |
118 | } | 118 | } |
119 | 119 | ||
120 | static void ipu_irq_mask(struct irq_data *d) | 120 | static void ipu_irq_mask(struct irq_data *d) |
@@ -124,11 +124,11 @@ static void ipu_irq_mask(struct irq_data *d) | |||
124 | uint32_t reg; | 124 | uint32_t reg; |
125 | unsigned long lock_flags; | 125 | unsigned long lock_flags; |
126 | 126 | ||
127 | raw_spin_lock_irqsave(&bank_lock, lock_flags); | 127 | spin_lock_irqsave(&bank_lock, lock_flags); |
128 | 128 | ||
129 | bank = map->bank; | 129 | bank = map->bank; |
130 | if (!bank) { | 130 | if (!bank) { |
131 | raw_spin_unlock_irqrestore(&bank_lock, lock_flags); | 131 | spin_unlock_irqrestore(&bank_lock, lock_flags); |
132 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq); | 132 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq); |
133 | return; | 133 | return; |
134 | } | 134 | } |
@@ -137,7 +137,7 @@ static void ipu_irq_mask(struct irq_data *d) | |||
137 | reg &= ~(1UL << (map->source & 31)); | 137 | reg &= ~(1UL << (map->source & 31)); |
138 | ipu_write_reg(bank->ipu, reg, bank->control); | 138 | ipu_write_reg(bank->ipu, reg, bank->control); |
139 | 139 | ||
140 | raw_spin_unlock_irqrestore(&bank_lock, lock_flags); | 140 | spin_unlock_irqrestore(&bank_lock, lock_flags); |
141 | } | 141 | } |
142 | 142 | ||
143 | static void ipu_irq_ack(struct irq_data *d) | 143 | static void ipu_irq_ack(struct irq_data *d) |
@@ -146,17 +146,17 @@ static void ipu_irq_ack(struct irq_data *d) | |||
146 | struct ipu_irq_bank *bank; | 146 | struct ipu_irq_bank *bank; |
147 | unsigned long lock_flags; | 147 | unsigned long lock_flags; |
148 | 148 | ||
149 | raw_spin_lock_irqsave(&bank_lock, lock_flags); | 149 | spin_lock_irqsave(&bank_lock, lock_flags); |
150 | 150 | ||
151 | bank = map->bank; | 151 | bank = map->bank; |
152 | if (!bank) { | 152 | if (!bank) { |
153 | raw_spin_unlock_irqrestore(&bank_lock, lock_flags); | 153 | spin_unlock_irqrestore(&bank_lock, lock_flags); |
154 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq); | 154 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq); |
155 | return; | 155 | return; |
156 | } | 156 | } |
157 | 157 | ||
158 | ipu_write_reg(bank->ipu, 1UL << (map->source & 31), bank->status); | 158 | ipu_write_reg(bank->ipu, 1UL << (map->source & 31), bank->status); |
159 | raw_spin_unlock_irqrestore(&bank_lock, lock_flags); | 159 | spin_unlock_irqrestore(&bank_lock, lock_flags); |
160 | } | 160 | } |
161 | 161 | ||
162 | /** | 162 | /** |
@@ -172,11 +172,11 @@ bool ipu_irq_status(unsigned int irq) | |||
172 | unsigned long lock_flags; | 172 | unsigned long lock_flags; |
173 | bool ret; | 173 | bool ret; |
174 | 174 | ||
175 | raw_spin_lock_irqsave(&bank_lock, lock_flags); | 175 | spin_lock_irqsave(&bank_lock, lock_flags); |
176 | bank = map->bank; | 176 | bank = map->bank; |
177 | ret = bank && ipu_read_reg(bank->ipu, bank->status) & | 177 | ret = bank && ipu_read_reg(bank->ipu, bank->status) & |
178 | (1UL << (map->source & 31)); | 178 | (1UL << (map->source & 31)); |
179 | raw_spin_unlock_irqrestore(&bank_lock, lock_flags); | 179 | spin_unlock_irqrestore(&bank_lock, lock_flags); |
180 | 180 | ||
181 | return ret; | 181 | return ret; |
182 | } | 182 | } |
@@ -213,10 +213,10 @@ int ipu_irq_map(unsigned int source) | |||
213 | if (irq_map[i].source < 0) { | 213 | if (irq_map[i].source < 0) { |
214 | unsigned long lock_flags; | 214 | unsigned long lock_flags; |
215 | 215 | ||
216 | raw_spin_lock_irqsave(&bank_lock, lock_flags); | 216 | spin_lock_irqsave(&bank_lock, lock_flags); |
217 | irq_map[i].source = source; | 217 | irq_map[i].source = source; |
218 | irq_map[i].bank = irq_bank + source / 32; | 218 | irq_map[i].bank = irq_bank + source / 32; |
219 | raw_spin_unlock_irqrestore(&bank_lock, lock_flags); | 219 | spin_unlock_irqrestore(&bank_lock, lock_flags); |
220 | 220 | ||
221 | ret = irq_map[i].irq; | 221 | ret = irq_map[i].irq; |
222 | pr_debug("IPU: mapped source %u to IRQ %u\n", | 222 | pr_debug("IPU: mapped source %u to IRQ %u\n", |
@@ -252,10 +252,10 @@ int ipu_irq_unmap(unsigned int source) | |||
252 | pr_debug("IPU: unmapped source %u from IRQ %u\n", | 252 | pr_debug("IPU: unmapped source %u from IRQ %u\n", |
253 | source, irq_map[i].irq); | 253 | source, irq_map[i].irq); |
254 | 254 | ||
255 | raw_spin_lock_irqsave(&bank_lock, lock_flags); | 255 | spin_lock_irqsave(&bank_lock, lock_flags); |
256 | irq_map[i].source = -EINVAL; | 256 | irq_map[i].source = -EINVAL; |
257 | irq_map[i].bank = NULL; | 257 | irq_map[i].bank = NULL; |
258 | raw_spin_unlock_irqrestore(&bank_lock, lock_flags); | 258 | spin_unlock_irqrestore(&bank_lock, lock_flags); |
259 | 259 | ||
260 | ret = 0; | 260 | ret = 0; |
261 | break; | 261 | break; |
@@ -276,7 +276,7 @@ static void ipu_irq_err(unsigned int irq, struct irq_desc *desc) | |||
276 | for (i = IPU_IRQ_NR_FN_BANKS; i < IPU_IRQ_NR_BANKS; i++) { | 276 | for (i = IPU_IRQ_NR_FN_BANKS; i < IPU_IRQ_NR_BANKS; i++) { |
277 | struct ipu_irq_bank *bank = irq_bank + i; | 277 | struct ipu_irq_bank *bank = irq_bank + i; |
278 | 278 | ||
279 | raw_spin_lock(&bank_lock); | 279 | spin_lock(&bank_lock); |
280 | status = ipu_read_reg(ipu, bank->status); | 280 | status = ipu_read_reg(ipu, bank->status); |
281 | /* | 281 | /* |
282 | * Don't think we have to clear all interrupts here, they will | 282 | * Don't think we have to clear all interrupts here, they will |
@@ -284,18 +284,18 @@ static void ipu_irq_err(unsigned int irq, struct irq_desc *desc) | |||
284 | * might want to clear unhandled interrupts after the loop... | 284 | * might want to clear unhandled interrupts after the loop... |
285 | */ | 285 | */ |
286 | status &= ipu_read_reg(ipu, bank->control); | 286 | status &= ipu_read_reg(ipu, bank->control); |
287 | raw_spin_unlock(&bank_lock); | 287 | spin_unlock(&bank_lock); |
288 | while ((line = ffs(status))) { | 288 | while ((line = ffs(status))) { |
289 | struct ipu_irq_map *map; | 289 | struct ipu_irq_map *map; |
290 | 290 | ||
291 | line--; | 291 | line--; |
292 | status &= ~(1UL << line); | 292 | status &= ~(1UL << line); |
293 | 293 | ||
294 | raw_spin_lock(&bank_lock); | 294 | spin_lock(&bank_lock); |
295 | map = src2map(32 * i + line); | 295 | map = src2map(32 * i + line); |
296 | if (map) | 296 | if (map) |
297 | irq = map->irq; | 297 | irq = map->irq; |
298 | raw_spin_unlock(&bank_lock); | 298 | spin_unlock(&bank_lock); |
299 | 299 | ||
300 | if (!map) { | 300 | if (!map) { |
301 | pr_err("IPU: Interrupt on unmapped source %u bank %d\n", | 301 | pr_err("IPU: Interrupt on unmapped source %u bank %d\n", |
@@ -317,22 +317,22 @@ static void ipu_irq_fn(unsigned int irq, struct irq_desc *desc) | |||
317 | for (i = 0; i < IPU_IRQ_NR_FN_BANKS; i++) { | 317 | for (i = 0; i < IPU_IRQ_NR_FN_BANKS; i++) { |
318 | struct ipu_irq_bank *bank = irq_bank + i; | 318 | struct ipu_irq_bank *bank = irq_bank + i; |
319 | 319 | ||
320 | raw_spin_lock(&bank_lock); | 320 | spin_lock(&bank_lock); |
321 | status = ipu_read_reg(ipu, bank->status); | 321 | status = ipu_read_reg(ipu, bank->status); |
322 | /* Not clearing all interrupts, see above */ | 322 | /* Not clearing all interrupts, see above */ |
323 | status &= ipu_read_reg(ipu, bank->control); | 323 | status &= ipu_read_reg(ipu, bank->control); |
324 | raw_spin_unlock(&bank_lock); | 324 | spin_unlock(&bank_lock); |
325 | while ((line = ffs(status))) { | 325 | while ((line = ffs(status))) { |
326 | struct ipu_irq_map *map; | 326 | struct ipu_irq_map *map; |
327 | 327 | ||
328 | line--; | 328 | line--; |
329 | status &= ~(1UL << line); | 329 | status &= ~(1UL << line); |
330 | 330 | ||
331 | raw_spin_lock(&bank_lock); | 331 | spin_lock(&bank_lock); |
332 | map = src2map(32 * i + line); | 332 | map = src2map(32 * i + line); |
333 | if (map) | 333 | if (map) |
334 | irq = map->irq; | 334 | irq = map->irq; |
335 | raw_spin_unlock(&bank_lock); | 335 | spin_unlock(&bank_lock); |
336 | 336 | ||
337 | if (!map) { | 337 | if (!map) { |
338 | pr_err("IPU: Interrupt on unmapped source %u bank %d\n", | 338 | pr_err("IPU: Interrupt on unmapped source %u bank %d\n", |
@@ -354,12 +354,10 @@ static struct irq_chip ipu_irq_chip = { | |||
354 | /* Install the IRQ handler */ | 354 | /* Install the IRQ handler */ |
355 | int __init ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev) | 355 | int __init ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev) |
356 | { | 356 | { |
357 | unsigned int irq, i; | 357 | struct ipu_platform_data *pdata = dev->dev.platform_data; |
358 | int irq_base = irq_alloc_descs(-1, 0, CONFIG_MX3_IPU_IRQS, | 358 | unsigned int irq, irq_base, i; |
359 | numa_node_id()); | ||
360 | 359 | ||
361 | if (irq_base < 0) | 360 | irq_base = pdata->irq_base; |
362 | return irq_base; | ||
363 | 361 | ||
364 | for (i = 0; i < IPU_IRQ_NR_BANKS; i++) | 362 | for (i = 0; i < IPU_IRQ_NR_BANKS; i++) |
365 | irq_bank[i].ipu = ipu; | 363 | irq_bank[i].ipu = ipu; |
@@ -389,16 +387,15 @@ int __init ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev) | |||
389 | irq_set_handler_data(ipu->irq_err, ipu); | 387 | irq_set_handler_data(ipu->irq_err, ipu); |
390 | irq_set_chained_handler(ipu->irq_err, ipu_irq_err); | 388 | irq_set_chained_handler(ipu->irq_err, ipu_irq_err); |
391 | 389 | ||
392 | ipu->irq_base = irq_base; | ||
393 | |||
394 | return 0; | 390 | return 0; |
395 | } | 391 | } |
396 | 392 | ||
397 | void ipu_irq_detach_irq(struct ipu *ipu, struct platform_device *dev) | 393 | void ipu_irq_detach_irq(struct ipu *ipu, struct platform_device *dev) |
398 | { | 394 | { |
395 | struct ipu_platform_data *pdata = dev->dev.platform_data; | ||
399 | unsigned int irq, irq_base; | 396 | unsigned int irq, irq_base; |
400 | 397 | ||
401 | irq_base = ipu->irq_base; | 398 | irq_base = pdata->irq_base; |
402 | 399 | ||
403 | irq_set_chained_handler(ipu->irq_fn, NULL); | 400 | irq_set_chained_handler(ipu->irq_fn, NULL); |
404 | irq_set_handler_data(ipu->irq_fn, NULL); | 401 | irq_set_handler_data(ipu->irq_fn, NULL); |
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c deleted file mode 100644 index c6d98c00f05..00000000000 --- a/drivers/dma/mmp_pdma.c +++ /dev/null | |||
@@ -1,875 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Marvell International Ltd. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | #include <linux/module.h> | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/dma-mapping.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/dmaengine.h> | ||
15 | #include <linux/platform_device.h> | ||
16 | #include <linux/device.h> | ||
17 | #include <linux/platform_data/mmp_dma.h> | ||
18 | #include <linux/dmapool.h> | ||
19 | #include <linux/of_device.h> | ||
20 | #include <linux/of.h> | ||
21 | |||
22 | #include "dmaengine.h" | ||
23 | |||
24 | #define DCSR 0x0000 | ||
25 | #define DALGN 0x00a0 | ||
26 | #define DINT 0x00f0 | ||
27 | #define DDADR 0x0200 | ||
28 | #define DSADR 0x0204 | ||
29 | #define DTADR 0x0208 | ||
30 | #define DCMD 0x020c | ||
31 | |||
32 | #define DCSR_RUN (1 << 31) /* Run Bit (read / write) */ | ||
33 | #define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */ | ||
34 | #define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */ | ||
35 | #define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */ | ||
36 | #define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */ | ||
37 | #define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */ | ||
38 | #define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */ | ||
39 | #define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */ | ||
40 | |||
41 | #define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */ | ||
42 | #define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */ | ||
43 | #define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */ | ||
44 | #define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */ | ||
45 | #define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */ | ||
46 | #define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */ | ||
47 | #define DCSR_EORINTR (1 << 9) /* The end of Receive */ | ||
48 | |||
49 | #define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */ | ||
50 | #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */ | ||
51 | |||
52 | #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */ | ||
53 | #define DDADR_STOP (1 << 0) /* Stop (read / write) */ | ||
54 | |||
55 | #define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */ | ||
56 | #define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */ | ||
57 | #define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */ | ||
58 | #define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */ | ||
59 | #define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */ | ||
60 | #define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */ | ||
61 | #define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */ | ||
62 | #define DCMD_BURST8 (1 << 16) /* 8 byte burst */ | ||
63 | #define DCMD_BURST16 (2 << 16) /* 16 byte burst */ | ||
64 | #define DCMD_BURST32 (3 << 16) /* 32 byte burst */ | ||
65 | #define DCMD_WIDTH1 (1 << 14) /* 1 byte width */ | ||
66 | #define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */ | ||
67 | #define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */ | ||
68 | #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */ | ||
69 | |||
70 | #define PDMA_ALIGNMENT 3 | ||
71 | #define PDMA_MAX_DESC_BYTES 0x1000 | ||
72 | |||
73 | struct mmp_pdma_desc_hw { | ||
74 | u32 ddadr; /* Points to the next descriptor + flags */ | ||
75 | u32 dsadr; /* DSADR value for the current transfer */ | ||
76 | u32 dtadr; /* DTADR value for the current transfer */ | ||
77 | u32 dcmd; /* DCMD value for the current transfer */ | ||
78 | } __aligned(32); | ||
79 | |||
80 | struct mmp_pdma_desc_sw { | ||
81 | struct mmp_pdma_desc_hw desc; | ||
82 | struct list_head node; | ||
83 | struct list_head tx_list; | ||
84 | struct dma_async_tx_descriptor async_tx; | ||
85 | }; | ||
86 | |||
87 | struct mmp_pdma_phy; | ||
88 | |||
89 | struct mmp_pdma_chan { | ||
90 | struct device *dev; | ||
91 | struct dma_chan chan; | ||
92 | struct dma_async_tx_descriptor desc; | ||
93 | struct mmp_pdma_phy *phy; | ||
94 | enum dma_transfer_direction dir; | ||
95 | |||
96 | /* channel's basic info */ | ||
97 | struct tasklet_struct tasklet; | ||
98 | u32 dcmd; | ||
99 | u32 drcmr; | ||
100 | u32 dev_addr; | ||
101 | |||
102 | /* list for desc */ | ||
103 | spinlock_t desc_lock; /* Descriptor list lock */ | ||
104 | struct list_head chain_pending; /* Link descriptors queue for pending */ | ||
105 | struct list_head chain_running; /* Link descriptors queue for running */ | ||
106 | bool idle; /* channel statue machine */ | ||
107 | |||
108 | struct dma_pool *desc_pool; /* Descriptors pool */ | ||
109 | }; | ||
110 | |||
111 | struct mmp_pdma_phy { | ||
112 | int idx; | ||
113 | void __iomem *base; | ||
114 | struct mmp_pdma_chan *vchan; | ||
115 | }; | ||
116 | |||
117 | struct mmp_pdma_device { | ||
118 | int dma_channels; | ||
119 | void __iomem *base; | ||
120 | struct device *dev; | ||
121 | struct dma_device device; | ||
122 | struct mmp_pdma_phy *phy; | ||
123 | }; | ||
124 | |||
125 | #define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx) | ||
126 | #define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node) | ||
127 | #define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan) | ||
128 | #define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device) | ||
129 | |||
130 | static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr) | ||
131 | { | ||
132 | u32 reg = (phy->idx << 4) + DDADR; | ||
133 | |||
134 | writel(addr, phy->base + reg); | ||
135 | } | ||
136 | |||
137 | static void enable_chan(struct mmp_pdma_phy *phy) | ||
138 | { | ||
139 | u32 reg; | ||
140 | |||
141 | if (!phy->vchan) | ||
142 | return; | ||
143 | |||
144 | reg = phy->vchan->drcmr; | ||
145 | reg = (((reg) < 64) ? 0x0100 : 0x1100) + (((reg) & 0x3f) << 2); | ||
146 | writel(DRCMR_MAPVLD | phy->idx, phy->base + reg); | ||
147 | |||
148 | reg = (phy->idx << 2) + DCSR; | ||
149 | writel(readl(phy->base + reg) | DCSR_RUN, | ||
150 | phy->base + reg); | ||
151 | } | ||
152 | |||
153 | static void disable_chan(struct mmp_pdma_phy *phy) | ||
154 | { | ||
155 | u32 reg; | ||
156 | |||
157 | if (phy) { | ||
158 | reg = (phy->idx << 2) + DCSR; | ||
159 | writel(readl(phy->base + reg) & ~DCSR_RUN, | ||
160 | phy->base + reg); | ||
161 | } | ||
162 | } | ||
163 | |||
164 | static int clear_chan_irq(struct mmp_pdma_phy *phy) | ||
165 | { | ||
166 | u32 dcsr; | ||
167 | u32 dint = readl(phy->base + DINT); | ||
168 | u32 reg = (phy->idx << 2) + DCSR; | ||
169 | |||
170 | if (dint & BIT(phy->idx)) { | ||
171 | /* clear irq */ | ||
172 | dcsr = readl(phy->base + reg); | ||
173 | writel(dcsr, phy->base + reg); | ||
174 | if ((dcsr & DCSR_BUSERR) && (phy->vchan)) | ||
175 | dev_warn(phy->vchan->dev, "DCSR_BUSERR\n"); | ||
176 | return 0; | ||
177 | } | ||
178 | return -EAGAIN; | ||
179 | } | ||
180 | |||
181 | static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id) | ||
182 | { | ||
183 | struct mmp_pdma_phy *phy = dev_id; | ||
184 | |||
185 | if (clear_chan_irq(phy) == 0) { | ||
186 | tasklet_schedule(&phy->vchan->tasklet); | ||
187 | return IRQ_HANDLED; | ||
188 | } else | ||
189 | return IRQ_NONE; | ||
190 | } | ||
191 | |||
192 | static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id) | ||
193 | { | ||
194 | struct mmp_pdma_device *pdev = dev_id; | ||
195 | struct mmp_pdma_phy *phy; | ||
196 | u32 dint = readl(pdev->base + DINT); | ||
197 | int i, ret; | ||
198 | int irq_num = 0; | ||
199 | |||
200 | while (dint) { | ||
201 | i = __ffs(dint); | ||
202 | dint &= (dint - 1); | ||
203 | phy = &pdev->phy[i]; | ||
204 | ret = mmp_pdma_chan_handler(irq, phy); | ||
205 | if (ret == IRQ_HANDLED) | ||
206 | irq_num++; | ||
207 | } | ||
208 | |||
209 | if (irq_num) | ||
210 | return IRQ_HANDLED; | ||
211 | else | ||
212 | return IRQ_NONE; | ||
213 | } | ||
214 | |||
215 | /* lookup free phy channel as descending priority */ | ||
216 | static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan) | ||
217 | { | ||
218 | int prio, i; | ||
219 | struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device); | ||
220 | struct mmp_pdma_phy *phy; | ||
221 | |||
222 | /* | ||
223 | * dma channel priorities | ||
224 | * ch 0 - 3, 16 - 19 <--> (0) | ||
225 | * ch 4 - 7, 20 - 23 <--> (1) | ||
226 | * ch 8 - 11, 24 - 27 <--> (2) | ||
227 | * ch 12 - 15, 28 - 31 <--> (3) | ||
228 | */ | ||
229 | for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) { | ||
230 | for (i = 0; i < pdev->dma_channels; i++) { | ||
231 | if (prio != ((i & 0xf) >> 2)) | ||
232 | continue; | ||
233 | phy = &pdev->phy[i]; | ||
234 | if (!phy->vchan) { | ||
235 | phy->vchan = pchan; | ||
236 | return phy; | ||
237 | } | ||
238 | } | ||
239 | } | ||
240 | |||
241 | return NULL; | ||
242 | } | ||
243 | |||
244 | /* desc->tx_list ==> pending list */ | ||
245 | static void append_pending_queue(struct mmp_pdma_chan *chan, | ||
246 | struct mmp_pdma_desc_sw *desc) | ||
247 | { | ||
248 | struct mmp_pdma_desc_sw *tail = | ||
249 | to_mmp_pdma_desc(chan->chain_pending.prev); | ||
250 | |||
251 | if (list_empty(&chan->chain_pending)) | ||
252 | goto out_splice; | ||
253 | |||
254 | /* one irq per queue, even appended */ | ||
255 | tail->desc.ddadr = desc->async_tx.phys; | ||
256 | tail->desc.dcmd &= ~DCMD_ENDIRQEN; | ||
257 | |||
258 | /* softly link to pending list */ | ||
259 | out_splice: | ||
260 | list_splice_tail_init(&desc->tx_list, &chan->chain_pending); | ||
261 | } | ||
262 | |||
263 | /** | ||
264 | * start_pending_queue - transfer any pending transactions | ||
265 | * pending list ==> running list | ||
266 | */ | ||
267 | static void start_pending_queue(struct mmp_pdma_chan *chan) | ||
268 | { | ||
269 | struct mmp_pdma_desc_sw *desc; | ||
270 | |||
271 | /* still in running, irq will start the pending list */ | ||
272 | if (!chan->idle) { | ||
273 | dev_dbg(chan->dev, "DMA controller still busy\n"); | ||
274 | return; | ||
275 | } | ||
276 | |||
277 | if (list_empty(&chan->chain_pending)) { | ||
278 | /* chance to re-fetch phy channel with higher prio */ | ||
279 | if (chan->phy) { | ||
280 | chan->phy->vchan = NULL; | ||
281 | chan->phy = NULL; | ||
282 | } | ||
283 | dev_dbg(chan->dev, "no pending list\n"); | ||
284 | return; | ||
285 | } | ||
286 | |||
287 | if (!chan->phy) { | ||
288 | chan->phy = lookup_phy(chan); | ||
289 | if (!chan->phy) { | ||
290 | dev_dbg(chan->dev, "no free dma channel\n"); | ||
291 | return; | ||
292 | } | ||
293 | } | ||
294 | |||
295 | /* | ||
296 | * pending -> running | ||
297 | * reintilize pending list | ||
298 | */ | ||
299 | desc = list_first_entry(&chan->chain_pending, | ||
300 | struct mmp_pdma_desc_sw, node); | ||
301 | list_splice_tail_init(&chan->chain_pending, &chan->chain_running); | ||
302 | |||
303 | /* | ||
304 | * Program the descriptor's address into the DMA controller, | ||
305 | * then start the DMA transaction | ||
306 | */ | ||
307 | set_desc(chan->phy, desc->async_tx.phys); | ||
308 | enable_chan(chan->phy); | ||
309 | chan->idle = false; | ||
310 | } | ||
311 | |||
312 | |||
313 | /* desc->tx_list ==> pending list */ | ||
314 | static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx) | ||
315 | { | ||
316 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan); | ||
317 | struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx); | ||
318 | struct mmp_pdma_desc_sw *child; | ||
319 | unsigned long flags; | ||
320 | dma_cookie_t cookie = -EBUSY; | ||
321 | |||
322 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
323 | |||
324 | list_for_each_entry(child, &desc->tx_list, node) { | ||
325 | cookie = dma_cookie_assign(&child->async_tx); | ||
326 | } | ||
327 | |||
328 | append_pending_queue(chan, desc); | ||
329 | |||
330 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
331 | |||
332 | return cookie; | ||
333 | } | ||
334 | |||
335 | struct mmp_pdma_desc_sw *mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan) | ||
336 | { | ||
337 | struct mmp_pdma_desc_sw *desc; | ||
338 | dma_addr_t pdesc; | ||
339 | |||
340 | desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); | ||
341 | if (!desc) { | ||
342 | dev_err(chan->dev, "out of memory for link descriptor\n"); | ||
343 | return NULL; | ||
344 | } | ||
345 | |||
346 | memset(desc, 0, sizeof(*desc)); | ||
347 | INIT_LIST_HEAD(&desc->tx_list); | ||
348 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); | ||
349 | /* each desc has submit */ | ||
350 | desc->async_tx.tx_submit = mmp_pdma_tx_submit; | ||
351 | desc->async_tx.phys = pdesc; | ||
352 | |||
353 | return desc; | ||
354 | } | ||
355 | |||
356 | /** | ||
357 | * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel. | ||
358 | * | ||
359 | * This function will create a dma pool for descriptor allocation. | ||
360 | * Request irq only when channel is requested | ||
361 | * Return - The number of allocated descriptors. | ||
362 | */ | ||
363 | |||
364 | static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan) | ||
365 | { | ||
366 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | ||
367 | |||
368 | if (chan->desc_pool) | ||
369 | return 1; | ||
370 | |||
371 | chan->desc_pool = | ||
372 | dma_pool_create(dev_name(&dchan->dev->device), chan->dev, | ||
373 | sizeof(struct mmp_pdma_desc_sw), | ||
374 | __alignof__(struct mmp_pdma_desc_sw), 0); | ||
375 | if (!chan->desc_pool) { | ||
376 | dev_err(chan->dev, "unable to allocate descriptor pool\n"); | ||
377 | return -ENOMEM; | ||
378 | } | ||
379 | if (chan->phy) { | ||
380 | chan->phy->vchan = NULL; | ||
381 | chan->phy = NULL; | ||
382 | } | ||
383 | chan->idle = true; | ||
384 | chan->dev_addr = 0; | ||
385 | return 1; | ||
386 | } | ||
387 | |||
388 | static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan, | ||
389 | struct list_head *list) | ||
390 | { | ||
391 | struct mmp_pdma_desc_sw *desc, *_desc; | ||
392 | |||
393 | list_for_each_entry_safe(desc, _desc, list, node) { | ||
394 | list_del(&desc->node); | ||
395 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); | ||
396 | } | ||
397 | } | ||
398 | |||
399 | static void mmp_pdma_free_chan_resources(struct dma_chan *dchan) | ||
400 | { | ||
401 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | ||
402 | unsigned long flags; | ||
403 | |||
404 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
405 | mmp_pdma_free_desc_list(chan, &chan->chain_pending); | ||
406 | mmp_pdma_free_desc_list(chan, &chan->chain_running); | ||
407 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
408 | |||
409 | dma_pool_destroy(chan->desc_pool); | ||
410 | chan->desc_pool = NULL; | ||
411 | chan->idle = true; | ||
412 | chan->dev_addr = 0; | ||
413 | if (chan->phy) { | ||
414 | chan->phy->vchan = NULL; | ||
415 | chan->phy = NULL; | ||
416 | } | ||
417 | return; | ||
418 | } | ||
419 | |||
420 | static struct dma_async_tx_descriptor * | ||
421 | mmp_pdma_prep_memcpy(struct dma_chan *dchan, | ||
422 | dma_addr_t dma_dst, dma_addr_t dma_src, | ||
423 | size_t len, unsigned long flags) | ||
424 | { | ||
425 | struct mmp_pdma_chan *chan; | ||
426 | struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; | ||
427 | size_t copy = 0; | ||
428 | |||
429 | if (!dchan) | ||
430 | return NULL; | ||
431 | |||
432 | if (!len) | ||
433 | return NULL; | ||
434 | |||
435 | chan = to_mmp_pdma_chan(dchan); | ||
436 | |||
437 | if (!chan->dir) { | ||
438 | chan->dir = DMA_MEM_TO_MEM; | ||
439 | chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR; | ||
440 | chan->dcmd |= DCMD_BURST32; | ||
441 | } | ||
442 | |||
443 | do { | ||
444 | /* Allocate the link descriptor from DMA pool */ | ||
445 | new = mmp_pdma_alloc_descriptor(chan); | ||
446 | if (!new) { | ||
447 | dev_err(chan->dev, "no memory for desc\n"); | ||
448 | goto fail; | ||
449 | } | ||
450 | |||
451 | copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES); | ||
452 | |||
453 | new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy); | ||
454 | new->desc.dsadr = dma_src; | ||
455 | new->desc.dtadr = dma_dst; | ||
456 | |||
457 | if (!first) | ||
458 | first = new; | ||
459 | else | ||
460 | prev->desc.ddadr = new->async_tx.phys; | ||
461 | |||
462 | new->async_tx.cookie = 0; | ||
463 | async_tx_ack(&new->async_tx); | ||
464 | |||
465 | prev = new; | ||
466 | len -= copy; | ||
467 | |||
468 | if (chan->dir == DMA_MEM_TO_DEV) { | ||
469 | dma_src += copy; | ||
470 | } else if (chan->dir == DMA_DEV_TO_MEM) { | ||
471 | dma_dst += copy; | ||
472 | } else if (chan->dir == DMA_MEM_TO_MEM) { | ||
473 | dma_src += copy; | ||
474 | dma_dst += copy; | ||
475 | } | ||
476 | |||
477 | /* Insert the link descriptor to the LD ring */ | ||
478 | list_add_tail(&new->node, &first->tx_list); | ||
479 | } while (len); | ||
480 | |||
481 | first->async_tx.flags = flags; /* client is in control of this ack */ | ||
482 | first->async_tx.cookie = -EBUSY; | ||
483 | |||
484 | /* last desc and fire IRQ */ | ||
485 | new->desc.ddadr = DDADR_STOP; | ||
486 | new->desc.dcmd |= DCMD_ENDIRQEN; | ||
487 | |||
488 | return &first->async_tx; | ||
489 | |||
490 | fail: | ||
491 | if (first) | ||
492 | mmp_pdma_free_desc_list(chan, &first->tx_list); | ||
493 | return NULL; | ||
494 | } | ||
495 | |||
496 | static struct dma_async_tx_descriptor * | ||
497 | mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, | ||
498 | unsigned int sg_len, enum dma_transfer_direction dir, | ||
499 | unsigned long flags, void *context) | ||
500 | { | ||
501 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | ||
502 | struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL; | ||
503 | size_t len, avail; | ||
504 | struct scatterlist *sg; | ||
505 | dma_addr_t addr; | ||
506 | int i; | ||
507 | |||
508 | if ((sgl == NULL) || (sg_len == 0)) | ||
509 | return NULL; | ||
510 | |||
511 | for_each_sg(sgl, sg, sg_len, i) { | ||
512 | addr = sg_dma_address(sg); | ||
513 | avail = sg_dma_len(sgl); | ||
514 | |||
515 | do { | ||
516 | len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES); | ||
517 | |||
518 | /* allocate and populate the descriptor */ | ||
519 | new = mmp_pdma_alloc_descriptor(chan); | ||
520 | if (!new) { | ||
521 | dev_err(chan->dev, "no memory for desc\n"); | ||
522 | goto fail; | ||
523 | } | ||
524 | |||
525 | new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len); | ||
526 | if (dir == DMA_MEM_TO_DEV) { | ||
527 | new->desc.dsadr = addr; | ||
528 | new->desc.dtadr = chan->dev_addr; | ||
529 | } else { | ||
530 | new->desc.dsadr = chan->dev_addr; | ||
531 | new->desc.dtadr = addr; | ||
532 | } | ||
533 | |||
534 | if (!first) | ||
535 | first = new; | ||
536 | else | ||
537 | prev->desc.ddadr = new->async_tx.phys; | ||
538 | |||
539 | new->async_tx.cookie = 0; | ||
540 | async_tx_ack(&new->async_tx); | ||
541 | prev = new; | ||
542 | |||
543 | /* Insert the link descriptor to the LD ring */ | ||
544 | list_add_tail(&new->node, &first->tx_list); | ||
545 | |||
546 | /* update metadata */ | ||
547 | addr += len; | ||
548 | avail -= len; | ||
549 | } while (avail); | ||
550 | } | ||
551 | |||
552 | first->async_tx.cookie = -EBUSY; | ||
553 | first->async_tx.flags = flags; | ||
554 | |||
555 | /* last desc and fire IRQ */ | ||
556 | new->desc.ddadr = DDADR_STOP; | ||
557 | new->desc.dcmd |= DCMD_ENDIRQEN; | ||
558 | |||
559 | return &first->async_tx; | ||
560 | |||
561 | fail: | ||
562 | if (first) | ||
563 | mmp_pdma_free_desc_list(chan, &first->tx_list); | ||
564 | return NULL; | ||
565 | } | ||
566 | |||
567 | static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, | ||
568 | unsigned long arg) | ||
569 | { | ||
570 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | ||
571 | struct dma_slave_config *cfg = (void *)arg; | ||
572 | unsigned long flags; | ||
573 | int ret = 0; | ||
574 | u32 maxburst = 0, addr = 0; | ||
575 | enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; | ||
576 | |||
577 | if (!dchan) | ||
578 | return -EINVAL; | ||
579 | |||
580 | switch (cmd) { | ||
581 | case DMA_TERMINATE_ALL: | ||
582 | disable_chan(chan->phy); | ||
583 | if (chan->phy) { | ||
584 | chan->phy->vchan = NULL; | ||
585 | chan->phy = NULL; | ||
586 | } | ||
587 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
588 | mmp_pdma_free_desc_list(chan, &chan->chain_pending); | ||
589 | mmp_pdma_free_desc_list(chan, &chan->chain_running); | ||
590 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
591 | chan->idle = true; | ||
592 | break; | ||
593 | case DMA_SLAVE_CONFIG: | ||
594 | if (cfg->direction == DMA_DEV_TO_MEM) { | ||
595 | chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC; | ||
596 | maxburst = cfg->src_maxburst; | ||
597 | width = cfg->src_addr_width; | ||
598 | addr = cfg->src_addr; | ||
599 | } else if (cfg->direction == DMA_MEM_TO_DEV) { | ||
600 | chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG; | ||
601 | maxburst = cfg->dst_maxburst; | ||
602 | width = cfg->dst_addr_width; | ||
603 | addr = cfg->dst_addr; | ||
604 | } | ||
605 | |||
606 | if (width == DMA_SLAVE_BUSWIDTH_1_BYTE) | ||
607 | chan->dcmd |= DCMD_WIDTH1; | ||
608 | else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) | ||
609 | chan->dcmd |= DCMD_WIDTH2; | ||
610 | else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES) | ||
611 | chan->dcmd |= DCMD_WIDTH4; | ||
612 | |||
613 | if (maxburst == 8) | ||
614 | chan->dcmd |= DCMD_BURST8; | ||
615 | else if (maxburst == 16) | ||
616 | chan->dcmd |= DCMD_BURST16; | ||
617 | else if (maxburst == 32) | ||
618 | chan->dcmd |= DCMD_BURST32; | ||
619 | |||
620 | if (cfg) { | ||
621 | chan->dir = cfg->direction; | ||
622 | chan->drcmr = cfg->slave_id; | ||
623 | } | ||
624 | chan->dev_addr = addr; | ||
625 | break; | ||
626 | default: | ||
627 | return -ENOSYS; | ||
628 | } | ||
629 | |||
630 | return ret; | ||
631 | } | ||
632 | |||
633 | static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan, | ||
634 | dma_cookie_t cookie, struct dma_tx_state *txstate) | ||
635 | { | ||
636 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | ||
637 | enum dma_status ret; | ||
638 | unsigned long flags; | ||
639 | |||
640 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
641 | ret = dma_cookie_status(dchan, cookie, txstate); | ||
642 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
643 | |||
644 | return ret; | ||
645 | } | ||
646 | |||
647 | /** | ||
648 | * mmp_pdma_issue_pending - Issue the DMA start command | ||
649 | * pending list ==> running list | ||
650 | */ | ||
651 | static void mmp_pdma_issue_pending(struct dma_chan *dchan) | ||
652 | { | ||
653 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | ||
654 | unsigned long flags; | ||
655 | |||
656 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
657 | start_pending_queue(chan); | ||
658 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
659 | } | ||
660 | |||
661 | /* | ||
662 | * dma_do_tasklet | ||
663 | * Do call back | ||
664 | * Start pending list | ||
665 | */ | ||
666 | static void dma_do_tasklet(unsigned long data) | ||
667 | { | ||
668 | struct mmp_pdma_chan *chan = (struct mmp_pdma_chan *)data; | ||
669 | struct mmp_pdma_desc_sw *desc, *_desc; | ||
670 | LIST_HEAD(chain_cleanup); | ||
671 | unsigned long flags; | ||
672 | |||
673 | /* submit pending list; callback for each desc; free desc */ | ||
674 | |||
675 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
676 | |||
677 | /* update the cookie if we have some descriptors to cleanup */ | ||
678 | if (!list_empty(&chan->chain_running)) { | ||
679 | dma_cookie_t cookie; | ||
680 | |||
681 | desc = to_mmp_pdma_desc(chan->chain_running.prev); | ||
682 | cookie = desc->async_tx.cookie; | ||
683 | dma_cookie_complete(&desc->async_tx); | ||
684 | |||
685 | dev_dbg(chan->dev, "completed_cookie=%d\n", cookie); | ||
686 | } | ||
687 | |||
688 | /* | ||
689 | * move the descriptors to a temporary list so we can drop the lock | ||
690 | * during the entire cleanup operation | ||
691 | */ | ||
692 | list_splice_tail_init(&chan->chain_running, &chain_cleanup); | ||
693 | |||
694 | /* the hardware is now idle and ready for more */ | ||
695 | chan->idle = true; | ||
696 | |||
697 | /* Start any pending transactions automatically */ | ||
698 | start_pending_queue(chan); | ||
699 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
700 | |||
701 | /* Run the callback for each descriptor, in order */ | ||
702 | list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) { | ||
703 | struct dma_async_tx_descriptor *txd = &desc->async_tx; | ||
704 | |||
705 | /* Remove from the list of transactions */ | ||
706 | list_del(&desc->node); | ||
707 | /* Run the link descriptor callback function */ | ||
708 | if (txd->callback) | ||
709 | txd->callback(txd->callback_param); | ||
710 | |||
711 | dma_pool_free(chan->desc_pool, desc, txd->phys); | ||
712 | } | ||
713 | } | ||
714 | |||
715 | static int mmp_pdma_remove(struct platform_device *op) | ||
716 | { | ||
717 | struct mmp_pdma_device *pdev = platform_get_drvdata(op); | ||
718 | |||
719 | dma_async_device_unregister(&pdev->device); | ||
720 | return 0; | ||
721 | } | ||
722 | |||
723 | static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, | ||
724 | int idx, int irq) | ||
725 | { | ||
726 | struct mmp_pdma_phy *phy = &pdev->phy[idx]; | ||
727 | struct mmp_pdma_chan *chan; | ||
728 | int ret; | ||
729 | |||
730 | chan = devm_kzalloc(pdev->dev, | ||
731 | sizeof(struct mmp_pdma_chan), GFP_KERNEL); | ||
732 | if (chan == NULL) | ||
733 | return -ENOMEM; | ||
734 | |||
735 | phy->idx = idx; | ||
736 | phy->base = pdev->base; | ||
737 | |||
738 | if (irq) { | ||
739 | ret = devm_request_irq(pdev->dev, irq, | ||
740 | mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy); | ||
741 | if (ret) { | ||
742 | dev_err(pdev->dev, "channel request irq fail!\n"); | ||
743 | return ret; | ||
744 | } | ||
745 | } | ||
746 | |||
747 | spin_lock_init(&chan->desc_lock); | ||
748 | chan->dev = pdev->dev; | ||
749 | chan->chan.device = &pdev->device; | ||
750 | tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); | ||
751 | INIT_LIST_HEAD(&chan->chain_pending); | ||
752 | INIT_LIST_HEAD(&chan->chain_running); | ||
753 | |||
754 | /* register virt channel to dma engine */ | ||
755 | list_add_tail(&chan->chan.device_node, | ||
756 | &pdev->device.channels); | ||
757 | |||
758 | return 0; | ||
759 | } | ||
760 | |||
761 | static struct of_device_id mmp_pdma_dt_ids[] = { | ||
762 | { .compatible = "marvell,pdma-1.0", }, | ||
763 | {} | ||
764 | }; | ||
765 | MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids); | ||
766 | |||
767 | static int mmp_pdma_probe(struct platform_device *op) | ||
768 | { | ||
769 | struct mmp_pdma_device *pdev; | ||
770 | const struct of_device_id *of_id; | ||
771 | struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev); | ||
772 | struct resource *iores; | ||
773 | int i, ret, irq = 0; | ||
774 | int dma_channels = 0, irq_num = 0; | ||
775 | |||
776 | pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL); | ||
777 | if (!pdev) | ||
778 | return -ENOMEM; | ||
779 | pdev->dev = &op->dev; | ||
780 | |||
781 | iores = platform_get_resource(op, IORESOURCE_MEM, 0); | ||
782 | if (!iores) | ||
783 | return -EINVAL; | ||
784 | |||
785 | pdev->base = devm_request_and_ioremap(pdev->dev, iores); | ||
786 | if (!pdev->base) | ||
787 | return -EADDRNOTAVAIL; | ||
788 | |||
789 | of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev); | ||
790 | if (of_id) | ||
791 | of_property_read_u32(pdev->dev->of_node, | ||
792 | "#dma-channels", &dma_channels); | ||
793 | else if (pdata && pdata->dma_channels) | ||
794 | dma_channels = pdata->dma_channels; | ||
795 | else | ||
796 | dma_channels = 32; /* default 32 channel */ | ||
797 | pdev->dma_channels = dma_channels; | ||
798 | |||
799 | for (i = 0; i < dma_channels; i++) { | ||
800 | if (platform_get_irq(op, i) > 0) | ||
801 | irq_num++; | ||
802 | } | ||
803 | |||
804 | pdev->phy = devm_kzalloc(pdev->dev, | ||
805 | dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL); | ||
806 | if (pdev->phy == NULL) | ||
807 | return -ENOMEM; | ||
808 | |||
809 | INIT_LIST_HEAD(&pdev->device.channels); | ||
810 | |||
811 | if (irq_num != dma_channels) { | ||
812 | /* all chan share one irq, demux inside */ | ||
813 | irq = platform_get_irq(op, 0); | ||
814 | ret = devm_request_irq(pdev->dev, irq, | ||
815 | mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev); | ||
816 | if (ret) | ||
817 | return ret; | ||
818 | } | ||
819 | |||
820 | for (i = 0; i < dma_channels; i++) { | ||
821 | irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i); | ||
822 | ret = mmp_pdma_chan_init(pdev, i, irq); | ||
823 | if (ret) | ||
824 | return ret; | ||
825 | } | ||
826 | |||
827 | dma_cap_set(DMA_SLAVE, pdev->device.cap_mask); | ||
828 | dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask); | ||
829 | dma_cap_set(DMA_SLAVE, pdev->device.cap_mask); | ||
830 | pdev->device.dev = &op->dev; | ||
831 | pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources; | ||
832 | pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources; | ||
833 | pdev->device.device_tx_status = mmp_pdma_tx_status; | ||
834 | pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy; | ||
835 | pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg; | ||
836 | pdev->device.device_issue_pending = mmp_pdma_issue_pending; | ||
837 | pdev->device.device_control = mmp_pdma_control; | ||
838 | pdev->device.copy_align = PDMA_ALIGNMENT; | ||
839 | |||
840 | if (pdev->dev->coherent_dma_mask) | ||
841 | dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask); | ||
842 | else | ||
843 | dma_set_mask(pdev->dev, DMA_BIT_MASK(64)); | ||
844 | |||
845 | ret = dma_async_device_register(&pdev->device); | ||
846 | if (ret) { | ||
847 | dev_err(pdev->device.dev, "unable to register\n"); | ||
848 | return ret; | ||
849 | } | ||
850 | |||
851 | dev_info(pdev->device.dev, "initialized\n"); | ||
852 | return 0; | ||
853 | } | ||
854 | |||
855 | static const struct platform_device_id mmp_pdma_id_table[] = { | ||
856 | { "mmp-pdma", }, | ||
857 | { }, | ||
858 | }; | ||
859 | |||
860 | static struct platform_driver mmp_pdma_driver = { | ||
861 | .driver = { | ||
862 | .name = "mmp-pdma", | ||
863 | .owner = THIS_MODULE, | ||
864 | .of_match_table = mmp_pdma_dt_ids, | ||
865 | }, | ||
866 | .id_table = mmp_pdma_id_table, | ||
867 | .probe = mmp_pdma_probe, | ||
868 | .remove = mmp_pdma_remove, | ||
869 | }; | ||
870 | |||
871 | module_platform_driver(mmp_pdma_driver); | ||
872 | |||
873 | MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver"); | ||
874 | MODULE_AUTHOR("Marvell International Ltd."); | ||
875 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c deleted file mode 100644 index a9f1cd56689..00000000000 --- a/drivers/dma/mmp_tdma.c +++ /dev/null | |||
@@ -1,621 +0,0 @@ | |||
1 | /* | ||
2 | * Driver For Marvell Two-channel DMA Engine | ||
3 | * | ||
4 | * Copyright: Marvell International Ltd. | ||
5 | * | ||
6 | * The code contained herein is licensed under the GNU General Public | ||
7 | * License. You may obtain a copy of the GNU General Public License | ||
8 | * Version 2 or later at the following locations: | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/types.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | #include <linux/dma-mapping.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/dmaengine.h> | ||
19 | #include <linux/platform_device.h> | ||
20 | #include <linux/device.h> | ||
21 | #include <mach/regs-icu.h> | ||
22 | #include <linux/platform_data/dma-mmp_tdma.h> | ||
23 | #include <linux/of_device.h> | ||
24 | |||
25 | #include "dmaengine.h" | ||
26 | |||
27 | /* | ||
28 | * Two-Channel DMA registers | ||
29 | */ | ||
30 | #define TDBCR 0x00 /* Byte Count */ | ||
31 | #define TDSAR 0x10 /* Src Addr */ | ||
32 | #define TDDAR 0x20 /* Dst Addr */ | ||
33 | #define TDNDPR 0x30 /* Next Desc */ | ||
34 | #define TDCR 0x40 /* Control */ | ||
35 | #define TDCP 0x60 /* Priority*/ | ||
36 | #define TDCDPR 0x70 /* Current Desc */ | ||
37 | #define TDIMR 0x80 /* Int Mask */ | ||
38 | #define TDISR 0xa0 /* Int Status */ | ||
39 | |||
40 | /* Two-Channel DMA Control Register */ | ||
41 | #define TDCR_SSZ_8_BITS (0x0 << 22) /* Sample Size */ | ||
42 | #define TDCR_SSZ_12_BITS (0x1 << 22) | ||
43 | #define TDCR_SSZ_16_BITS (0x2 << 22) | ||
44 | #define TDCR_SSZ_20_BITS (0x3 << 22) | ||
45 | #define TDCR_SSZ_24_BITS (0x4 << 22) | ||
46 | #define TDCR_SSZ_32_BITS (0x5 << 22) | ||
47 | #define TDCR_SSZ_SHIFT (0x1 << 22) | ||
48 | #define TDCR_SSZ_MASK (0x7 << 22) | ||
49 | #define TDCR_SSPMOD (0x1 << 21) /* SSP MOD */ | ||
50 | #define TDCR_ABR (0x1 << 20) /* Channel Abort */ | ||
51 | #define TDCR_CDE (0x1 << 17) /* Close Desc Enable */ | ||
52 | #define TDCR_PACKMOD (0x1 << 16) /* Pack Mode (ADMA Only) */ | ||
53 | #define TDCR_CHANACT (0x1 << 14) /* Channel Active */ | ||
54 | #define TDCR_FETCHND (0x1 << 13) /* Fetch Next Desc */ | ||
55 | #define TDCR_CHANEN (0x1 << 12) /* Channel Enable */ | ||
56 | #define TDCR_INTMODE (0x1 << 10) /* Interrupt Mode */ | ||
57 | #define TDCR_CHAINMOD (0x1 << 9) /* Chain Mode */ | ||
58 | #define TDCR_BURSTSZ_MSK (0x7 << 6) /* Burst Size */ | ||
59 | #define TDCR_BURSTSZ_4B (0x0 << 6) | ||
60 | #define TDCR_BURSTSZ_8B (0x1 << 6) | ||
61 | #define TDCR_BURSTSZ_16B (0x3 << 6) | ||
62 | #define TDCR_BURSTSZ_32B (0x6 << 6) | ||
63 | #define TDCR_BURSTSZ_64B (0x7 << 6) | ||
64 | #define TDCR_BURSTSZ_SQU_32B (0x7 << 6) | ||
65 | #define TDCR_BURSTSZ_128B (0x5 << 6) | ||
66 | #define TDCR_DSTDIR_MSK (0x3 << 4) /* Dst Direction */ | ||
67 | #define TDCR_DSTDIR_ADDR_HOLD (0x2 << 4) /* Dst Addr Hold */ | ||
68 | #define TDCR_DSTDIR_ADDR_INC (0x0 << 4) /* Dst Addr Increment */ | ||
69 | #define TDCR_SRCDIR_MSK (0x3 << 2) /* Src Direction */ | ||
70 | #define TDCR_SRCDIR_ADDR_HOLD (0x2 << 2) /* Src Addr Hold */ | ||
71 | #define TDCR_SRCDIR_ADDR_INC (0x0 << 2) /* Src Addr Increment */ | ||
72 | #define TDCR_DSTDESCCONT (0x1 << 1) | ||
73 | #define TDCR_SRCDESTCONT (0x1 << 0) | ||
74 | |||
75 | /* Two-Channel DMA Int Mask Register */ | ||
76 | #define TDIMR_COMP (0x1 << 0) | ||
77 | |||
78 | /* Two-Channel DMA Int Status Register */ | ||
79 | #define TDISR_COMP (0x1 << 0) | ||
80 | |||
81 | /* | ||
82 | * Two-Channel DMA Descriptor Struct | ||
83 | * NOTE: desc's buf must be aligned to 16 bytes. | ||
84 | */ | ||
85 | struct mmp_tdma_desc { | ||
86 | u32 byte_cnt; | ||
87 | u32 src_addr; | ||
88 | u32 dst_addr; | ||
89 | u32 nxt_desc; | ||
90 | }; | ||
91 | |||
92 | enum mmp_tdma_type { | ||
93 | MMP_AUD_TDMA = 0, | ||
94 | PXA910_SQU, | ||
95 | }; | ||
96 | |||
97 | #define TDMA_ALIGNMENT 3 | ||
98 | #define TDMA_MAX_XFER_BYTES SZ_64K | ||
99 | |||
100 | struct mmp_tdma_chan { | ||
101 | struct device *dev; | ||
102 | struct dma_chan chan; | ||
103 | struct dma_async_tx_descriptor desc; | ||
104 | struct tasklet_struct tasklet; | ||
105 | |||
106 | struct mmp_tdma_desc *desc_arr; | ||
107 | phys_addr_t desc_arr_phys; | ||
108 | int desc_num; | ||
109 | enum dma_transfer_direction dir; | ||
110 | dma_addr_t dev_addr; | ||
111 | u32 burst_sz; | ||
112 | enum dma_slave_buswidth buswidth; | ||
113 | enum dma_status status; | ||
114 | |||
115 | int idx; | ||
116 | enum mmp_tdma_type type; | ||
117 | int irq; | ||
118 | unsigned long reg_base; | ||
119 | |||
120 | size_t buf_len; | ||
121 | size_t period_len; | ||
122 | size_t pos; | ||
123 | }; | ||
124 | |||
125 | #define TDMA_CHANNEL_NUM 2 | ||
126 | struct mmp_tdma_device { | ||
127 | struct device *dev; | ||
128 | void __iomem *base; | ||
129 | struct dma_device device; | ||
130 | struct mmp_tdma_chan *tdmac[TDMA_CHANNEL_NUM]; | ||
131 | }; | ||
132 | |||
133 | #define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan) | ||
134 | |||
135 | static void mmp_tdma_chan_set_desc(struct mmp_tdma_chan *tdmac, dma_addr_t phys) | ||
136 | { | ||
137 | writel(phys, tdmac->reg_base + TDNDPR); | ||
138 | writel(readl(tdmac->reg_base + TDCR) | TDCR_FETCHND, | ||
139 | tdmac->reg_base + TDCR); | ||
140 | } | ||
141 | |||
142 | static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac) | ||
143 | { | ||
144 | /* enable irq */ | ||
145 | writel(TDIMR_COMP, tdmac->reg_base + TDIMR); | ||
146 | /* enable dma chan */ | ||
147 | writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN, | ||
148 | tdmac->reg_base + TDCR); | ||
149 | tdmac->status = DMA_IN_PROGRESS; | ||
150 | } | ||
151 | |||
152 | static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac) | ||
153 | { | ||
154 | writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, | ||
155 | tdmac->reg_base + TDCR); | ||
156 | tdmac->status = DMA_SUCCESS; | ||
157 | } | ||
158 | |||
159 | static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac) | ||
160 | { | ||
161 | writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN, | ||
162 | tdmac->reg_base + TDCR); | ||
163 | tdmac->status = DMA_IN_PROGRESS; | ||
164 | } | ||
165 | |||
166 | static void mmp_tdma_pause_chan(struct mmp_tdma_chan *tdmac) | ||
167 | { | ||
168 | writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, | ||
169 | tdmac->reg_base + TDCR); | ||
170 | tdmac->status = DMA_PAUSED; | ||
171 | } | ||
172 | |||
173 | static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac) | ||
174 | { | ||
175 | unsigned int tdcr; | ||
176 | |||
177 | mmp_tdma_disable_chan(tdmac); | ||
178 | |||
179 | if (tdmac->dir == DMA_MEM_TO_DEV) | ||
180 | tdcr = TDCR_DSTDIR_ADDR_HOLD | TDCR_SRCDIR_ADDR_INC; | ||
181 | else if (tdmac->dir == DMA_DEV_TO_MEM) | ||
182 | tdcr = TDCR_SRCDIR_ADDR_HOLD | TDCR_DSTDIR_ADDR_INC; | ||
183 | |||
184 | if (tdmac->type == MMP_AUD_TDMA) { | ||
185 | tdcr |= TDCR_PACKMOD; | ||
186 | |||
187 | switch (tdmac->burst_sz) { | ||
188 | case 4: | ||
189 | tdcr |= TDCR_BURSTSZ_4B; | ||
190 | break; | ||
191 | case 8: | ||
192 | tdcr |= TDCR_BURSTSZ_8B; | ||
193 | break; | ||
194 | case 16: | ||
195 | tdcr |= TDCR_BURSTSZ_16B; | ||
196 | break; | ||
197 | case 32: | ||
198 | tdcr |= TDCR_BURSTSZ_32B; | ||
199 | break; | ||
200 | case 64: | ||
201 | tdcr |= TDCR_BURSTSZ_64B; | ||
202 | break; | ||
203 | case 128: | ||
204 | tdcr |= TDCR_BURSTSZ_128B; | ||
205 | break; | ||
206 | default: | ||
207 | dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n"); | ||
208 | return -EINVAL; | ||
209 | } | ||
210 | |||
211 | switch (tdmac->buswidth) { | ||
212 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
213 | tdcr |= TDCR_SSZ_8_BITS; | ||
214 | break; | ||
215 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
216 | tdcr |= TDCR_SSZ_16_BITS; | ||
217 | break; | ||
218 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
219 | tdcr |= TDCR_SSZ_32_BITS; | ||
220 | break; | ||
221 | default: | ||
222 | dev_err(tdmac->dev, "mmp_tdma: unknown bus size.\n"); | ||
223 | return -EINVAL; | ||
224 | } | ||
225 | } else if (tdmac->type == PXA910_SQU) { | ||
226 | tdcr |= TDCR_BURSTSZ_SQU_32B; | ||
227 | tdcr |= TDCR_SSPMOD; | ||
228 | } | ||
229 | |||
230 | writel(tdcr, tdmac->reg_base + TDCR); | ||
231 | return 0; | ||
232 | } | ||
233 | |||
234 | static int mmp_tdma_clear_chan_irq(struct mmp_tdma_chan *tdmac) | ||
235 | { | ||
236 | u32 reg = readl(tdmac->reg_base + TDISR); | ||
237 | |||
238 | if (reg & TDISR_COMP) { | ||
239 | /* clear irq */ | ||
240 | reg &= ~TDISR_COMP; | ||
241 | writel(reg, tdmac->reg_base + TDISR); | ||
242 | |||
243 | return 0; | ||
244 | } | ||
245 | return -EAGAIN; | ||
246 | } | ||
247 | |||
248 | static irqreturn_t mmp_tdma_chan_handler(int irq, void *dev_id) | ||
249 | { | ||
250 | struct mmp_tdma_chan *tdmac = dev_id; | ||
251 | |||
252 | if (mmp_tdma_clear_chan_irq(tdmac) == 0) { | ||
253 | tdmac->pos = (tdmac->pos + tdmac->period_len) % tdmac->buf_len; | ||
254 | tasklet_schedule(&tdmac->tasklet); | ||
255 | return IRQ_HANDLED; | ||
256 | } else | ||
257 | return IRQ_NONE; | ||
258 | } | ||
259 | |||
260 | static irqreturn_t mmp_tdma_int_handler(int irq, void *dev_id) | ||
261 | { | ||
262 | struct mmp_tdma_device *tdev = dev_id; | ||
263 | int i, ret; | ||
264 | int irq_num = 0; | ||
265 | |||
266 | for (i = 0; i < TDMA_CHANNEL_NUM; i++) { | ||
267 | struct mmp_tdma_chan *tdmac = tdev->tdmac[i]; | ||
268 | |||
269 | ret = mmp_tdma_chan_handler(irq, tdmac); | ||
270 | if (ret == IRQ_HANDLED) | ||
271 | irq_num++; | ||
272 | } | ||
273 | |||
274 | if (irq_num) | ||
275 | return IRQ_HANDLED; | ||
276 | else | ||
277 | return IRQ_NONE; | ||
278 | } | ||
279 | |||
280 | static void dma_do_tasklet(unsigned long data) | ||
281 | { | ||
282 | struct mmp_tdma_chan *tdmac = (struct mmp_tdma_chan *)data; | ||
283 | |||
284 | if (tdmac->desc.callback) | ||
285 | tdmac->desc.callback(tdmac->desc.callback_param); | ||
286 | |||
287 | } | ||
288 | |||
289 | static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac) | ||
290 | { | ||
291 | struct gen_pool *gpool; | ||
292 | int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc); | ||
293 | |||
294 | gpool = sram_get_gpool("asram"); | ||
295 | if (tdmac->desc_arr) | ||
296 | gen_pool_free(gpool, (unsigned long)tdmac->desc_arr, | ||
297 | size); | ||
298 | tdmac->desc_arr = NULL; | ||
299 | |||
300 | return; | ||
301 | } | ||
302 | |||
303 | static dma_cookie_t mmp_tdma_tx_submit(struct dma_async_tx_descriptor *tx) | ||
304 | { | ||
305 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(tx->chan); | ||
306 | |||
307 | mmp_tdma_chan_set_desc(tdmac, tdmac->desc_arr_phys); | ||
308 | |||
309 | return 0; | ||
310 | } | ||
311 | |||
312 | static int mmp_tdma_alloc_chan_resources(struct dma_chan *chan) | ||
313 | { | ||
314 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); | ||
315 | int ret; | ||
316 | |||
317 | dma_async_tx_descriptor_init(&tdmac->desc, chan); | ||
318 | tdmac->desc.tx_submit = mmp_tdma_tx_submit; | ||
319 | |||
320 | if (tdmac->irq) { | ||
321 | ret = devm_request_irq(tdmac->dev, tdmac->irq, | ||
322 | mmp_tdma_chan_handler, IRQF_DISABLED, "tdma", tdmac); | ||
323 | if (ret) | ||
324 | return ret; | ||
325 | } | ||
326 | return 1; | ||
327 | } | ||
328 | |||
329 | static void mmp_tdma_free_chan_resources(struct dma_chan *chan) | ||
330 | { | ||
331 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); | ||
332 | |||
333 | if (tdmac->irq) | ||
334 | devm_free_irq(tdmac->dev, tdmac->irq, tdmac); | ||
335 | mmp_tdma_free_descriptor(tdmac); | ||
336 | return; | ||
337 | } | ||
338 | |||
339 | struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac) | ||
340 | { | ||
341 | struct gen_pool *gpool; | ||
342 | int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc); | ||
343 | |||
344 | gpool = sram_get_gpool("asram"); | ||
345 | if (!gpool) | ||
346 | return NULL; | ||
347 | |||
348 | tdmac->desc_arr = (void *)gen_pool_alloc(gpool, size); | ||
349 | if (!tdmac->desc_arr) | ||
350 | return NULL; | ||
351 | |||
352 | tdmac->desc_arr_phys = gen_pool_virt_to_phys(gpool, | ||
353 | (unsigned long)tdmac->desc_arr); | ||
354 | |||
355 | return tdmac->desc_arr; | ||
356 | } | ||
357 | |||
358 | static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic( | ||
359 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | ||
360 | size_t period_len, enum dma_transfer_direction direction, | ||
361 | unsigned long flags, void *context) | ||
362 | { | ||
363 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); | ||
364 | struct mmp_tdma_desc *desc; | ||
365 | int num_periods = buf_len / period_len; | ||
366 | int i = 0, buf = 0; | ||
367 | |||
368 | if (tdmac->status != DMA_SUCCESS) | ||
369 | return NULL; | ||
370 | |||
371 | if (period_len > TDMA_MAX_XFER_BYTES) { | ||
372 | dev_err(tdmac->dev, | ||
373 | "maximum period size exceeded: %d > %d\n", | ||
374 | period_len, TDMA_MAX_XFER_BYTES); | ||
375 | goto err_out; | ||
376 | } | ||
377 | |||
378 | tdmac->status = DMA_IN_PROGRESS; | ||
379 | tdmac->desc_num = num_periods; | ||
380 | desc = mmp_tdma_alloc_descriptor(tdmac); | ||
381 | if (!desc) | ||
382 | goto err_out; | ||
383 | |||
384 | while (buf < buf_len) { | ||
385 | desc = &tdmac->desc_arr[i]; | ||
386 | |||
387 | if (i + 1 == num_periods) | ||
388 | desc->nxt_desc = tdmac->desc_arr_phys; | ||
389 | else | ||
390 | desc->nxt_desc = tdmac->desc_arr_phys + | ||
391 | sizeof(*desc) * (i + 1); | ||
392 | |||
393 | if (direction == DMA_MEM_TO_DEV) { | ||
394 | desc->src_addr = dma_addr; | ||
395 | desc->dst_addr = tdmac->dev_addr; | ||
396 | } else { | ||
397 | desc->src_addr = tdmac->dev_addr; | ||
398 | desc->dst_addr = dma_addr; | ||
399 | } | ||
400 | desc->byte_cnt = period_len; | ||
401 | dma_addr += period_len; | ||
402 | buf += period_len; | ||
403 | i++; | ||
404 | } | ||
405 | |||
406 | tdmac->buf_len = buf_len; | ||
407 | tdmac->period_len = period_len; | ||
408 | tdmac->pos = 0; | ||
409 | |||
410 | return &tdmac->desc; | ||
411 | |||
412 | err_out: | ||
413 | tdmac->status = DMA_ERROR; | ||
414 | return NULL; | ||
415 | } | ||
416 | |||
417 | static int mmp_tdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
418 | unsigned long arg) | ||
419 | { | ||
420 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); | ||
421 | struct dma_slave_config *dmaengine_cfg = (void *)arg; | ||
422 | int ret = 0; | ||
423 | |||
424 | switch (cmd) { | ||
425 | case DMA_TERMINATE_ALL: | ||
426 | mmp_tdma_disable_chan(tdmac); | ||
427 | break; | ||
428 | case DMA_PAUSE: | ||
429 | mmp_tdma_pause_chan(tdmac); | ||
430 | break; | ||
431 | case DMA_RESUME: | ||
432 | mmp_tdma_resume_chan(tdmac); | ||
433 | break; | ||
434 | case DMA_SLAVE_CONFIG: | ||
435 | if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { | ||
436 | tdmac->dev_addr = dmaengine_cfg->src_addr; | ||
437 | tdmac->burst_sz = dmaengine_cfg->src_maxburst; | ||
438 | tdmac->buswidth = dmaengine_cfg->src_addr_width; | ||
439 | } else { | ||
440 | tdmac->dev_addr = dmaengine_cfg->dst_addr; | ||
441 | tdmac->burst_sz = dmaengine_cfg->dst_maxburst; | ||
442 | tdmac->buswidth = dmaengine_cfg->dst_addr_width; | ||
443 | } | ||
444 | tdmac->dir = dmaengine_cfg->direction; | ||
445 | return mmp_tdma_config_chan(tdmac); | ||
446 | default: | ||
447 | ret = -ENOSYS; | ||
448 | } | ||
449 | |||
450 | return ret; | ||
451 | } | ||
452 | |||
453 | static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan, | ||
454 | dma_cookie_t cookie, struct dma_tx_state *txstate) | ||
455 | { | ||
456 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); | ||
457 | |||
458 | dma_set_residue(txstate, tdmac->buf_len - tdmac->pos); | ||
459 | |||
460 | return tdmac->status; | ||
461 | } | ||
462 | |||
463 | static void mmp_tdma_issue_pending(struct dma_chan *chan) | ||
464 | { | ||
465 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); | ||
466 | |||
467 | mmp_tdma_enable_chan(tdmac); | ||
468 | } | ||
469 | |||
470 | static int mmp_tdma_remove(struct platform_device *pdev) | ||
471 | { | ||
472 | struct mmp_tdma_device *tdev = platform_get_drvdata(pdev); | ||
473 | |||
474 | dma_async_device_unregister(&tdev->device); | ||
475 | return 0; | ||
476 | } | ||
477 | |||
478 | static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev, | ||
479 | int idx, int irq, int type) | ||
480 | { | ||
481 | struct mmp_tdma_chan *tdmac; | ||
482 | |||
483 | if (idx >= TDMA_CHANNEL_NUM) { | ||
484 | dev_err(tdev->dev, "too many channels for device!\n"); | ||
485 | return -EINVAL; | ||
486 | } | ||
487 | |||
488 | /* alloc channel */ | ||
489 | tdmac = devm_kzalloc(tdev->dev, sizeof(*tdmac), GFP_KERNEL); | ||
490 | if (!tdmac) { | ||
491 | dev_err(tdev->dev, "no free memory for DMA channels!\n"); | ||
492 | return -ENOMEM; | ||
493 | } | ||
494 | if (irq) | ||
495 | tdmac->irq = irq; | ||
496 | tdmac->dev = tdev->dev; | ||
497 | tdmac->chan.device = &tdev->device; | ||
498 | tdmac->idx = idx; | ||
499 | tdmac->type = type; | ||
500 | tdmac->reg_base = (unsigned long)tdev->base + idx * 4; | ||
501 | tdmac->status = DMA_SUCCESS; | ||
502 | tdev->tdmac[tdmac->idx] = tdmac; | ||
503 | tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac); | ||
504 | |||
505 | /* add the channel to tdma_chan list */ | ||
506 | list_add_tail(&tdmac->chan.device_node, | ||
507 | &tdev->device.channels); | ||
508 | return 0; | ||
509 | } | ||
510 | |||
511 | static struct of_device_id mmp_tdma_dt_ids[] = { | ||
512 | { .compatible = "marvell,adma-1.0", .data = (void *)MMP_AUD_TDMA}, | ||
513 | { .compatible = "marvell,pxa910-squ", .data = (void *)PXA910_SQU}, | ||
514 | {} | ||
515 | }; | ||
516 | MODULE_DEVICE_TABLE(of, mmp_tdma_dt_ids); | ||
517 | |||
518 | static int mmp_tdma_probe(struct platform_device *pdev) | ||
519 | { | ||
520 | enum mmp_tdma_type type; | ||
521 | const struct of_device_id *of_id; | ||
522 | struct mmp_tdma_device *tdev; | ||
523 | struct resource *iores; | ||
524 | int i, ret; | ||
525 | int irq = 0, irq_num = 0; | ||
526 | int chan_num = TDMA_CHANNEL_NUM; | ||
527 | |||
528 | of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev); | ||
529 | if (of_id) | ||
530 | type = (enum mmp_tdma_type) of_id->data; | ||
531 | else | ||
532 | type = platform_get_device_id(pdev)->driver_data; | ||
533 | |||
534 | /* always have couple channels */ | ||
535 | tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL); | ||
536 | if (!tdev) | ||
537 | return -ENOMEM; | ||
538 | |||
539 | tdev->dev = &pdev->dev; | ||
540 | |||
541 | for (i = 0; i < chan_num; i++) { | ||
542 | if (platform_get_irq(pdev, i) > 0) | ||
543 | irq_num++; | ||
544 | } | ||
545 | |||
546 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
547 | if (!iores) | ||
548 | return -EINVAL; | ||
549 | |||
550 | tdev->base = devm_request_and_ioremap(&pdev->dev, iores); | ||
551 | if (!tdev->base) | ||
552 | return -EADDRNOTAVAIL; | ||
553 | |||
554 | INIT_LIST_HEAD(&tdev->device.channels); | ||
555 | |||
556 | if (irq_num != chan_num) { | ||
557 | irq = platform_get_irq(pdev, 0); | ||
558 | ret = devm_request_irq(&pdev->dev, irq, | ||
559 | mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev); | ||
560 | if (ret) | ||
561 | return ret; | ||
562 | } | ||
563 | |||
564 | /* initialize channel parameters */ | ||
565 | for (i = 0; i < chan_num; i++) { | ||
566 | irq = (irq_num != chan_num) ? 0 : platform_get_irq(pdev, i); | ||
567 | ret = mmp_tdma_chan_init(tdev, i, irq, type); | ||
568 | if (ret) | ||
569 | return ret; | ||
570 | } | ||
571 | |||
572 | dma_cap_set(DMA_SLAVE, tdev->device.cap_mask); | ||
573 | dma_cap_set(DMA_CYCLIC, tdev->device.cap_mask); | ||
574 | tdev->device.dev = &pdev->dev; | ||
575 | tdev->device.device_alloc_chan_resources = | ||
576 | mmp_tdma_alloc_chan_resources; | ||
577 | tdev->device.device_free_chan_resources = | ||
578 | mmp_tdma_free_chan_resources; | ||
579 | tdev->device.device_prep_dma_cyclic = mmp_tdma_prep_dma_cyclic; | ||
580 | tdev->device.device_tx_status = mmp_tdma_tx_status; | ||
581 | tdev->device.device_issue_pending = mmp_tdma_issue_pending; | ||
582 | tdev->device.device_control = mmp_tdma_control; | ||
583 | tdev->device.copy_align = TDMA_ALIGNMENT; | ||
584 | |||
585 | dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); | ||
586 | platform_set_drvdata(pdev, tdev); | ||
587 | |||
588 | ret = dma_async_device_register(&tdev->device); | ||
589 | if (ret) { | ||
590 | dev_err(tdev->device.dev, "unable to register\n"); | ||
591 | return ret; | ||
592 | } | ||
593 | |||
594 | dev_info(tdev->device.dev, "initialized\n"); | ||
595 | return 0; | ||
596 | } | ||
597 | |||
598 | static const struct platform_device_id mmp_tdma_id_table[] = { | ||
599 | { "mmp-adma", MMP_AUD_TDMA }, | ||
600 | { "pxa910-squ", PXA910_SQU }, | ||
601 | { }, | ||
602 | }; | ||
603 | |||
604 | static struct platform_driver mmp_tdma_driver = { | ||
605 | .driver = { | ||
606 | .name = "mmp-tdma", | ||
607 | .owner = THIS_MODULE, | ||
608 | .of_match_table = mmp_tdma_dt_ids, | ||
609 | }, | ||
610 | .id_table = mmp_tdma_id_table, | ||
611 | .probe = mmp_tdma_probe, | ||
612 | .remove = mmp_tdma_remove, | ||
613 | }; | ||
614 | |||
615 | module_platform_driver(mmp_tdma_driver); | ||
616 | |||
617 | MODULE_LICENSE("GPL"); | ||
618 | MODULE_DESCRIPTION("MMP Two-Channel DMA Driver"); | ||
619 | MODULE_ALIAS("platform:mmp-tdma"); | ||
620 | MODULE_AUTHOR("Leo Yan <leoy@marvell.com>"); | ||
621 | MODULE_AUTHOR("Zhangfei Gao <zhangfei.gao@marvell.com>"); | ||
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index 2d956732aa3..b9bae94f201 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c | |||
@@ -44,8 +44,6 @@ | |||
44 | 44 | ||
45 | #include <linux/random.h> | 45 | #include <linux/random.h> |
46 | 46 | ||
47 | #include "dmaengine.h" | ||
48 | |||
49 | /* Number of DMA Transfer descriptors allocated per channel */ | 47 | /* Number of DMA Transfer descriptors allocated per channel */ |
50 | #define MPC_DMA_DESCRIPTORS 64 | 48 | #define MPC_DMA_DESCRIPTORS 64 |
51 | 49 | ||
@@ -190,6 +188,7 @@ struct mpc_dma_chan { | |||
190 | struct list_head completed; | 188 | struct list_head completed; |
191 | struct mpc_dma_tcd *tcd; | 189 | struct mpc_dma_tcd *tcd; |
192 | dma_addr_t tcd_paddr; | 190 | dma_addr_t tcd_paddr; |
191 | dma_cookie_t completed_cookie; | ||
193 | 192 | ||
194 | /* Lock for this structure */ | 193 | /* Lock for this structure */ |
195 | spinlock_t lock; | 194 | spinlock_t lock; |
@@ -366,7 +365,7 @@ static void mpc_dma_process_completed(struct mpc_dma *mdma) | |||
366 | /* Free descriptors */ | 365 | /* Free descriptors */ |
367 | spin_lock_irqsave(&mchan->lock, flags); | 366 | spin_lock_irqsave(&mchan->lock, flags); |
368 | list_splice_tail_init(&list, &mchan->free); | 367 | list_splice_tail_init(&list, &mchan->free); |
369 | mchan->chan.completed_cookie = last_cookie; | 368 | mchan->completed_cookie = last_cookie; |
370 | spin_unlock_irqrestore(&mchan->lock, flags); | 369 | spin_unlock_irqrestore(&mchan->lock, flags); |
371 | } | 370 | } |
372 | } | 371 | } |
@@ -439,7 +438,13 @@ static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd) | |||
439 | mpc_dma_execute(mchan); | 438 | mpc_dma_execute(mchan); |
440 | 439 | ||
441 | /* Update cookie */ | 440 | /* Update cookie */ |
442 | cookie = dma_cookie_assign(txd); | 441 | cookie = mchan->chan.cookie + 1; |
442 | if (cookie <= 0) | ||
443 | cookie = 1; | ||
444 | |||
445 | mchan->chan.cookie = cookie; | ||
446 | mdesc->desc.cookie = cookie; | ||
447 | |||
443 | spin_unlock_irqrestore(&mchan->lock, flags); | 448 | spin_unlock_irqrestore(&mchan->lock, flags); |
444 | 449 | ||
445 | return cookie; | 450 | return cookie; |
@@ -557,14 +562,17 @@ mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
557 | struct dma_tx_state *txstate) | 562 | struct dma_tx_state *txstate) |
558 | { | 563 | { |
559 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); | 564 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); |
560 | enum dma_status ret; | ||
561 | unsigned long flags; | 565 | unsigned long flags; |
566 | dma_cookie_t last_used; | ||
567 | dma_cookie_t last_complete; | ||
562 | 568 | ||
563 | spin_lock_irqsave(&mchan->lock, flags); | 569 | spin_lock_irqsave(&mchan->lock, flags); |
564 | ret = dma_cookie_status(chan, cookie, txstate); | 570 | last_used = mchan->chan.cookie; |
571 | last_complete = mchan->completed_cookie; | ||
565 | spin_unlock_irqrestore(&mchan->lock, flags); | 572 | spin_unlock_irqrestore(&mchan->lock, flags); |
566 | 573 | ||
567 | return ret; | 574 | dma_set_tx_state(txstate, last_complete, last_used, 0); |
575 | return dma_async_is_complete(cookie, last_complete, last_used); | ||
568 | } | 576 | } |
569 | 577 | ||
570 | /* Prepare descriptor for memory to memory copy */ | 578 | /* Prepare descriptor for memory to memory copy */ |
@@ -641,7 +649,7 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, | |||
641 | return &mdesc->desc; | 649 | return &mdesc->desc; |
642 | } | 650 | } |
643 | 651 | ||
644 | static int mpc_dma_probe(struct platform_device *op) | 652 | static int __devinit mpc_dma_probe(struct platform_device *op) |
645 | { | 653 | { |
646 | struct device_node *dn = op->dev.of_node; | 654 | struct device_node *dn = op->dev.of_node; |
647 | struct device *dev = &op->dev; | 655 | struct device *dev = &op->dev; |
@@ -733,7 +741,9 @@ static int mpc_dma_probe(struct platform_device *op) | |||
733 | mchan = &mdma->channels[i]; | 741 | mchan = &mdma->channels[i]; |
734 | 742 | ||
735 | mchan->chan.device = dma; | 743 | mchan->chan.device = dma; |
736 | dma_cookie_init(&mchan->chan); | 744 | mchan->chan.chan_id = i; |
745 | mchan->chan.cookie = 1; | ||
746 | mchan->completed_cookie = mchan->chan.cookie; | ||
737 | 747 | ||
738 | INIT_LIST_HEAD(&mchan->free); | 748 | INIT_LIST_HEAD(&mchan->free); |
739 | INIT_LIST_HEAD(&mchan->prepared); | 749 | INIT_LIST_HEAD(&mchan->prepared); |
@@ -799,7 +809,7 @@ static int mpc_dma_probe(struct platform_device *op) | |||
799 | return retval; | 809 | return retval; |
800 | } | 810 | } |
801 | 811 | ||
802 | static int mpc_dma_remove(struct platform_device *op) | 812 | static int __devexit mpc_dma_remove(struct platform_device *op) |
803 | { | 813 | { |
804 | struct device *dev = &op->dev; | 814 | struct device *dev = &op->dev; |
805 | struct mpc_dma *mdma = dev_get_drvdata(dev); | 815 | struct mpc_dma *mdma = dev_get_drvdata(dev); |
@@ -818,7 +828,7 @@ static struct of_device_id mpc_dma_match[] = { | |||
818 | 828 | ||
819 | static struct platform_driver mpc_dma_driver = { | 829 | static struct platform_driver mpc_dma_driver = { |
820 | .probe = mpc_dma_probe, | 830 | .probe = mpc_dma_probe, |
821 | .remove = mpc_dma_remove, | 831 | .remove = __devexit_p(mpc_dma_remove), |
822 | .driver = { | 832 | .driver = { |
823 | .name = DRV_NAME, | 833 | .name = DRV_NAME, |
824 | .owner = THIS_MODULE, | 834 | .owner = THIS_MODULE, |
@@ -826,7 +836,17 @@ static struct platform_driver mpc_dma_driver = { | |||
826 | }, | 836 | }, |
827 | }; | 837 | }; |
828 | 838 | ||
829 | module_platform_driver(mpc_dma_driver); | 839 | static int __init mpc_dma_init(void) |
840 | { | ||
841 | return platform_driver_register(&mpc_dma_driver); | ||
842 | } | ||
843 | module_init(mpc_dma_init); | ||
844 | |||
845 | static void __exit mpc_dma_exit(void) | ||
846 | { | ||
847 | platform_driver_unregister(&mpc_dma_driver); | ||
848 | } | ||
849 | module_exit(mpc_dma_exit); | ||
830 | 850 | ||
831 | MODULE_LICENSE("GPL"); | 851 | MODULE_LICENSE("GPL"); |
832 | MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>"); | 852 | MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>"); |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index e17fad03cb8..9a353c2216d 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -25,26 +25,20 @@ | |||
25 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
26 | #include <linux/platform_device.h> | 26 | #include <linux/platform_device.h> |
27 | #include <linux/memory.h> | 27 | #include <linux/memory.h> |
28 | #include <linux/clk.h> | 28 | #include <plat/mv_xor.h> |
29 | #include <linux/of.h> | ||
30 | #include <linux/of_irq.h> | ||
31 | #include <linux/irqdomain.h> | ||
32 | #include <linux/platform_data/dma-mv_xor.h> | ||
33 | |||
34 | #include "dmaengine.h" | ||
35 | #include "mv_xor.h" | 29 | #include "mv_xor.h" |
36 | 30 | ||
37 | static void mv_xor_issue_pending(struct dma_chan *chan); | 31 | static void mv_xor_issue_pending(struct dma_chan *chan); |
38 | 32 | ||
39 | #define to_mv_xor_chan(chan) \ | 33 | #define to_mv_xor_chan(chan) \ |
40 | container_of(chan, struct mv_xor_chan, dmachan) | 34 | container_of(chan, struct mv_xor_chan, common) |
35 | |||
36 | #define to_mv_xor_device(dev) \ | ||
37 | container_of(dev, struct mv_xor_device, common) | ||
41 | 38 | ||
42 | #define to_mv_xor_slot(tx) \ | 39 | #define to_mv_xor_slot(tx) \ |
43 | container_of(tx, struct mv_xor_desc_slot, async_tx) | 40 | container_of(tx, struct mv_xor_desc_slot, async_tx) |
44 | 41 | ||
45 | #define mv_chan_to_devp(chan) \ | ||
46 | ((chan)->dmadev.dev) | ||
47 | |||
48 | static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) | 42 | static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) |
49 | { | 43 | { |
50 | struct mv_xor_desc *hw_desc = desc->hw_desc; | 44 | struct mv_xor_desc *hw_desc = desc->hw_desc; |
@@ -169,7 +163,7 @@ static int mv_is_err_intr(u32 intr_cause) | |||
169 | static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) | 163 | static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) |
170 | { | 164 | { |
171 | u32 val = ~(1 << (chan->idx * 16)); | 165 | u32 val = ~(1 << (chan->idx * 16)); |
172 | dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val); | 166 | dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val); |
173 | __raw_writel(val, XOR_INTR_CAUSE(chan)); | 167 | __raw_writel(val, XOR_INTR_CAUSE(chan)); |
174 | } | 168 | } |
175 | 169 | ||
@@ -209,9 +203,9 @@ static void mv_set_mode(struct mv_xor_chan *chan, | |||
209 | op_mode = XOR_OPERATION_MODE_MEMSET; | 203 | op_mode = XOR_OPERATION_MODE_MEMSET; |
210 | break; | 204 | break; |
211 | default: | 205 | default: |
212 | dev_err(mv_chan_to_devp(chan), | 206 | dev_printk(KERN_ERR, chan->device->common.dev, |
213 | "error: unsupported operation %d.\n", | 207 | "error: unsupported operation %d.\n", |
214 | type); | 208 | type); |
215 | BUG(); | 209 | BUG(); |
216 | return; | 210 | return; |
217 | } | 211 | } |
@@ -226,7 +220,7 @@ static void mv_chan_activate(struct mv_xor_chan *chan) | |||
226 | { | 220 | { |
227 | u32 activation; | 221 | u32 activation; |
228 | 222 | ||
229 | dev_dbg(mv_chan_to_devp(chan), " activate chan.\n"); | 223 | dev_dbg(chan->device->common.dev, " activate chan.\n"); |
230 | activation = __raw_readl(XOR_ACTIVATION(chan)); | 224 | activation = __raw_readl(XOR_ACTIVATION(chan)); |
231 | activation |= 0x1; | 225 | activation |= 0x1; |
232 | __raw_writel(activation, XOR_ACTIVATION(chan)); | 226 | __raw_writel(activation, XOR_ACTIVATION(chan)); |
@@ -254,7 +248,7 @@ static int mv_chan_xor_slot_count(size_t len, int src_cnt) | |||
254 | static void mv_xor_free_slots(struct mv_xor_chan *mv_chan, | 248 | static void mv_xor_free_slots(struct mv_xor_chan *mv_chan, |
255 | struct mv_xor_desc_slot *slot) | 249 | struct mv_xor_desc_slot *slot) |
256 | { | 250 | { |
257 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n", | 251 | dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n", |
258 | __func__, __LINE__, slot); | 252 | __func__, __LINE__, slot); |
259 | 253 | ||
260 | slot->slots_per_op = 0; | 254 | slot->slots_per_op = 0; |
@@ -269,7 +263,7 @@ static void mv_xor_free_slots(struct mv_xor_chan *mv_chan, | |||
269 | static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, | 263 | static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, |
270 | struct mv_xor_desc_slot *sw_desc) | 264 | struct mv_xor_desc_slot *sw_desc) |
271 | { | 265 | { |
272 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n", | 266 | dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n", |
273 | __func__, __LINE__, sw_desc); | 267 | __func__, __LINE__, sw_desc); |
274 | if (sw_desc->type != mv_chan->current_type) | 268 | if (sw_desc->type != mv_chan->current_type) |
275 | mv_set_mode(mv_chan, sw_desc->type); | 269 | mv_set_mode(mv_chan, sw_desc->type); |
@@ -287,7 +281,7 @@ static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, | |||
287 | mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); | 281 | mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); |
288 | } | 282 | } |
289 | mv_chan->pending += sw_desc->slot_cnt; | 283 | mv_chan->pending += sw_desc->slot_cnt; |
290 | mv_xor_issue_pending(&mv_chan->dmachan); | 284 | mv_xor_issue_pending(&mv_chan->common); |
291 | } | 285 | } |
292 | 286 | ||
293 | static dma_cookie_t | 287 | static dma_cookie_t |
@@ -311,7 +305,8 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, | |||
311 | */ | 305 | */ |
312 | if (desc->group_head && desc->unmap_len) { | 306 | if (desc->group_head && desc->unmap_len) { |
313 | struct mv_xor_desc_slot *unmap = desc->group_head; | 307 | struct mv_xor_desc_slot *unmap = desc->group_head; |
314 | struct device *dev = mv_chan_to_devp(mv_chan); | 308 | struct device *dev = |
309 | &mv_chan->device->pdev->dev; | ||
315 | u32 len = unmap->unmap_len; | 310 | u32 len = unmap->unmap_len; |
316 | enum dma_ctrl_flags flags = desc->async_tx.flags; | 311 | enum dma_ctrl_flags flags = desc->async_tx.flags; |
317 | u32 src_cnt; | 312 | u32 src_cnt; |
@@ -355,7 +350,7 @@ mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan) | |||
355 | { | 350 | { |
356 | struct mv_xor_desc_slot *iter, *_iter; | 351 | struct mv_xor_desc_slot *iter, *_iter; |
357 | 352 | ||
358 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); | 353 | dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__); |
359 | list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, | 354 | list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, |
360 | completed_node) { | 355 | completed_node) { |
361 | 356 | ||
@@ -371,7 +366,7 @@ static int | |||
371 | mv_xor_clean_slot(struct mv_xor_desc_slot *desc, | 366 | mv_xor_clean_slot(struct mv_xor_desc_slot *desc, |
372 | struct mv_xor_chan *mv_chan) | 367 | struct mv_xor_chan *mv_chan) |
373 | { | 368 | { |
374 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n", | 369 | dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n", |
375 | __func__, __LINE__, desc, desc->async_tx.flags); | 370 | __func__, __LINE__, desc, desc->async_tx.flags); |
376 | list_del(&desc->chain_node); | 371 | list_del(&desc->chain_node); |
377 | /* the client is allowed to attach dependent operations | 372 | /* the client is allowed to attach dependent operations |
@@ -395,8 +390,8 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) | |||
395 | u32 current_desc = mv_chan_get_current_desc(mv_chan); | 390 | u32 current_desc = mv_chan_get_current_desc(mv_chan); |
396 | int seen_current = 0; | 391 | int seen_current = 0; |
397 | 392 | ||
398 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); | 393 | dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__); |
399 | dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc); | 394 | dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc); |
400 | mv_xor_clean_completed_slots(mv_chan); | 395 | mv_xor_clean_completed_slots(mv_chan); |
401 | 396 | ||
402 | /* free completed slots from the chain starting with | 397 | /* free completed slots from the chain starting with |
@@ -440,7 +435,7 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) | |||
440 | } | 435 | } |
441 | 436 | ||
442 | if (cookie > 0) | 437 | if (cookie > 0) |
443 | mv_chan->dmachan.completed_cookie = cookie; | 438 | mv_chan->completed_cookie = cookie; |
444 | } | 439 | } |
445 | 440 | ||
446 | static void | 441 | static void |
@@ -539,6 +534,18 @@ retry: | |||
539 | return NULL; | 534 | return NULL; |
540 | } | 535 | } |
541 | 536 | ||
537 | static dma_cookie_t | ||
538 | mv_desc_assign_cookie(struct mv_xor_chan *mv_chan, | ||
539 | struct mv_xor_desc_slot *desc) | ||
540 | { | ||
541 | dma_cookie_t cookie = mv_chan->common.cookie; | ||
542 | |||
543 | if (++cookie < 0) | ||
544 | cookie = 1; | ||
545 | mv_chan->common.cookie = desc->async_tx.cookie = cookie; | ||
546 | return cookie; | ||
547 | } | ||
548 | |||
542 | /************************ DMA engine API functions ****************************/ | 549 | /************************ DMA engine API functions ****************************/ |
543 | static dma_cookie_t | 550 | static dma_cookie_t |
544 | mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) | 551 | mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) |
@@ -549,14 +556,14 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) | |||
549 | dma_cookie_t cookie; | 556 | dma_cookie_t cookie; |
550 | int new_hw_chain = 1; | 557 | int new_hw_chain = 1; |
551 | 558 | ||
552 | dev_dbg(mv_chan_to_devp(mv_chan), | 559 | dev_dbg(mv_chan->device->common.dev, |
553 | "%s sw_desc %p: async_tx %p\n", | 560 | "%s sw_desc %p: async_tx %p\n", |
554 | __func__, sw_desc, &sw_desc->async_tx); | 561 | __func__, sw_desc, &sw_desc->async_tx); |
555 | 562 | ||
556 | grp_start = sw_desc->group_head; | 563 | grp_start = sw_desc->group_head; |
557 | 564 | ||
558 | spin_lock_bh(&mv_chan->lock); | 565 | spin_lock_bh(&mv_chan->lock); |
559 | cookie = dma_cookie_assign(tx); | 566 | cookie = mv_desc_assign_cookie(mv_chan, sw_desc); |
560 | 567 | ||
561 | if (list_empty(&mv_chan->chain)) | 568 | if (list_empty(&mv_chan->chain)) |
562 | list_splice_init(&sw_desc->tx_list, &mv_chan->chain); | 569 | list_splice_init(&sw_desc->tx_list, &mv_chan->chain); |
@@ -572,7 +579,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) | |||
572 | if (!mv_can_chain(grp_start)) | 579 | if (!mv_can_chain(grp_start)) |
573 | goto submit_done; | 580 | goto submit_done; |
574 | 581 | ||
575 | dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n", | 582 | dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n", |
576 | old_chain_tail->async_tx.phys); | 583 | old_chain_tail->async_tx.phys); |
577 | 584 | ||
578 | /* fix up the hardware chain */ | 585 | /* fix up the hardware chain */ |
@@ -606,7 +613,9 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) | |||
606 | int idx; | 613 | int idx; |
607 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | 614 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); |
608 | struct mv_xor_desc_slot *slot = NULL; | 615 | struct mv_xor_desc_slot *slot = NULL; |
609 | int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE; | 616 | struct mv_xor_platform_data *plat_data = |
617 | mv_chan->device->pdev->dev.platform_data; | ||
618 | int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE; | ||
610 | 619 | ||
611 | /* Allocate descriptor slots */ | 620 | /* Allocate descriptor slots */ |
612 | idx = mv_chan->slots_allocated; | 621 | idx = mv_chan->slots_allocated; |
@@ -617,7 +626,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) | |||
617 | " %d descriptor slots", idx); | 626 | " %d descriptor slots", idx); |
618 | break; | 627 | break; |
619 | } | 628 | } |
620 | hw_desc = (char *) mv_chan->dma_desc_pool_virt; | 629 | hw_desc = (char *) mv_chan->device->dma_desc_pool_virt; |
621 | slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE]; | 630 | slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE]; |
622 | 631 | ||
623 | dma_async_tx_descriptor_init(&slot->async_tx, chan); | 632 | dma_async_tx_descriptor_init(&slot->async_tx, chan); |
@@ -625,7 +634,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) | |||
625 | INIT_LIST_HEAD(&slot->chain_node); | 634 | INIT_LIST_HEAD(&slot->chain_node); |
626 | INIT_LIST_HEAD(&slot->slot_node); | 635 | INIT_LIST_HEAD(&slot->slot_node); |
627 | INIT_LIST_HEAD(&slot->tx_list); | 636 | INIT_LIST_HEAD(&slot->tx_list); |
628 | hw_desc = (char *) mv_chan->dma_desc_pool; | 637 | hw_desc = (char *) mv_chan->device->dma_desc_pool; |
629 | slot->async_tx.phys = | 638 | slot->async_tx.phys = |
630 | (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; | 639 | (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; |
631 | slot->idx = idx++; | 640 | slot->idx = idx++; |
@@ -641,7 +650,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) | |||
641 | struct mv_xor_desc_slot, | 650 | struct mv_xor_desc_slot, |
642 | slot_node); | 651 | slot_node); |
643 | 652 | ||
644 | dev_dbg(mv_chan_to_devp(mv_chan), | 653 | dev_dbg(mv_chan->device->common.dev, |
645 | "allocated %d descriptor slots last_used: %p\n", | 654 | "allocated %d descriptor slots last_used: %p\n", |
646 | mv_chan->slots_allocated, mv_chan->last_used); | 655 | mv_chan->slots_allocated, mv_chan->last_used); |
647 | 656 | ||
@@ -656,7 +665,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
656 | struct mv_xor_desc_slot *sw_desc, *grp_start; | 665 | struct mv_xor_desc_slot *sw_desc, *grp_start; |
657 | int slot_cnt; | 666 | int slot_cnt; |
658 | 667 | ||
659 | dev_dbg(mv_chan_to_devp(mv_chan), | 668 | dev_dbg(mv_chan->device->common.dev, |
660 | "%s dest: %x src %x len: %u flags: %ld\n", | 669 | "%s dest: %x src %x len: %u flags: %ld\n", |
661 | __func__, dest, src, len, flags); | 670 | __func__, dest, src, len, flags); |
662 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | 671 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) |
@@ -680,7 +689,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
680 | } | 689 | } |
681 | spin_unlock_bh(&mv_chan->lock); | 690 | spin_unlock_bh(&mv_chan->lock); |
682 | 691 | ||
683 | dev_dbg(mv_chan_to_devp(mv_chan), | 692 | dev_dbg(mv_chan->device->common.dev, |
684 | "%s sw_desc %p async_tx %p\n", | 693 | "%s sw_desc %p async_tx %p\n", |
685 | __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0); | 694 | __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0); |
686 | 695 | ||
@@ -695,7 +704,7 @@ mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, | |||
695 | struct mv_xor_desc_slot *sw_desc, *grp_start; | 704 | struct mv_xor_desc_slot *sw_desc, *grp_start; |
696 | int slot_cnt; | 705 | int slot_cnt; |
697 | 706 | ||
698 | dev_dbg(mv_chan_to_devp(mv_chan), | 707 | dev_dbg(mv_chan->device->common.dev, |
699 | "%s dest: %x len: %u flags: %ld\n", | 708 | "%s dest: %x len: %u flags: %ld\n", |
700 | __func__, dest, len, flags); | 709 | __func__, dest, len, flags); |
701 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | 710 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) |
@@ -718,7 +727,7 @@ mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, | |||
718 | sw_desc->unmap_len = len; | 727 | sw_desc->unmap_len = len; |
719 | } | 728 | } |
720 | spin_unlock_bh(&mv_chan->lock); | 729 | spin_unlock_bh(&mv_chan->lock); |
721 | dev_dbg(mv_chan_to_devp(mv_chan), | 730 | dev_dbg(mv_chan->device->common.dev, |
722 | "%s sw_desc %p async_tx %p \n", | 731 | "%s sw_desc %p async_tx %p \n", |
723 | __func__, sw_desc, &sw_desc->async_tx); | 732 | __func__, sw_desc, &sw_desc->async_tx); |
724 | return sw_desc ? &sw_desc->async_tx : NULL; | 733 | return sw_desc ? &sw_desc->async_tx : NULL; |
@@ -737,7 +746,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | |||
737 | 746 | ||
738 | BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); | 747 | BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); |
739 | 748 | ||
740 | dev_dbg(mv_chan_to_devp(mv_chan), | 749 | dev_dbg(mv_chan->device->common.dev, |
741 | "%s src_cnt: %d len: dest %x %u flags: %ld\n", | 750 | "%s src_cnt: %d len: dest %x %u flags: %ld\n", |
742 | __func__, src_cnt, len, dest, flags); | 751 | __func__, src_cnt, len, dest, flags); |
743 | 752 | ||
@@ -758,7 +767,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | |||
758 | mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]); | 767 | mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]); |
759 | } | 768 | } |
760 | spin_unlock_bh(&mv_chan->lock); | 769 | spin_unlock_bh(&mv_chan->lock); |
761 | dev_dbg(mv_chan_to_devp(mv_chan), | 770 | dev_dbg(mv_chan->device->common.dev, |
762 | "%s sw_desc %p async_tx %p \n", | 771 | "%s sw_desc %p async_tx %p \n", |
763 | __func__, sw_desc, &sw_desc->async_tx); | 772 | __func__, sw_desc, &sw_desc->async_tx); |
764 | return sw_desc ? &sw_desc->async_tx : NULL; | 773 | return sw_desc ? &sw_desc->async_tx : NULL; |
@@ -791,12 +800,12 @@ static void mv_xor_free_chan_resources(struct dma_chan *chan) | |||
791 | } | 800 | } |
792 | mv_chan->last_used = NULL; | 801 | mv_chan->last_used = NULL; |
793 | 802 | ||
794 | dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n", | 803 | dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n", |
795 | __func__, mv_chan->slots_allocated); | 804 | __func__, mv_chan->slots_allocated); |
796 | spin_unlock_bh(&mv_chan->lock); | 805 | spin_unlock_bh(&mv_chan->lock); |
797 | 806 | ||
798 | if (in_use_descs) | 807 | if (in_use_descs) |
799 | dev_err(mv_chan_to_devp(mv_chan), | 808 | dev_err(mv_chan->device->common.dev, |
800 | "freeing %d in use descriptors!\n", in_use_descs); | 809 | "freeing %d in use descriptors!\n", in_use_descs); |
801 | } | 810 | } |
802 | 811 | ||
@@ -811,16 +820,27 @@ static enum dma_status mv_xor_status(struct dma_chan *chan, | |||
811 | struct dma_tx_state *txstate) | 820 | struct dma_tx_state *txstate) |
812 | { | 821 | { |
813 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | 822 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); |
823 | dma_cookie_t last_used; | ||
824 | dma_cookie_t last_complete; | ||
814 | enum dma_status ret; | 825 | enum dma_status ret; |
815 | 826 | ||
816 | ret = dma_cookie_status(chan, cookie, txstate); | 827 | last_used = chan->cookie; |
828 | last_complete = mv_chan->completed_cookie; | ||
829 | mv_chan->is_complete_cookie = cookie; | ||
830 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
831 | |||
832 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
817 | if (ret == DMA_SUCCESS) { | 833 | if (ret == DMA_SUCCESS) { |
818 | mv_xor_clean_completed_slots(mv_chan); | 834 | mv_xor_clean_completed_slots(mv_chan); |
819 | return ret; | 835 | return ret; |
820 | } | 836 | } |
821 | mv_xor_slot_cleanup(mv_chan); | 837 | mv_xor_slot_cleanup(mv_chan); |
822 | 838 | ||
823 | return dma_cookie_status(chan, cookie, txstate); | 839 | last_used = chan->cookie; |
840 | last_complete = mv_chan->completed_cookie; | ||
841 | |||
842 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
843 | return dma_async_is_complete(cookie, last_complete, last_used); | ||
824 | } | 844 | } |
825 | 845 | ||
826 | static void mv_dump_xor_regs(struct mv_xor_chan *chan) | 846 | static void mv_dump_xor_regs(struct mv_xor_chan *chan) |
@@ -828,42 +848,42 @@ static void mv_dump_xor_regs(struct mv_xor_chan *chan) | |||
828 | u32 val; | 848 | u32 val; |
829 | 849 | ||
830 | val = __raw_readl(XOR_CONFIG(chan)); | 850 | val = __raw_readl(XOR_CONFIG(chan)); |
831 | dev_err(mv_chan_to_devp(chan), | 851 | dev_printk(KERN_ERR, chan->device->common.dev, |
832 | "config 0x%08x.\n", val); | 852 | "config 0x%08x.\n", val); |
833 | 853 | ||
834 | val = __raw_readl(XOR_ACTIVATION(chan)); | 854 | val = __raw_readl(XOR_ACTIVATION(chan)); |
835 | dev_err(mv_chan_to_devp(chan), | 855 | dev_printk(KERN_ERR, chan->device->common.dev, |
836 | "activation 0x%08x.\n", val); | 856 | "activation 0x%08x.\n", val); |
837 | 857 | ||
838 | val = __raw_readl(XOR_INTR_CAUSE(chan)); | 858 | val = __raw_readl(XOR_INTR_CAUSE(chan)); |
839 | dev_err(mv_chan_to_devp(chan), | 859 | dev_printk(KERN_ERR, chan->device->common.dev, |
840 | "intr cause 0x%08x.\n", val); | 860 | "intr cause 0x%08x.\n", val); |
841 | 861 | ||
842 | val = __raw_readl(XOR_INTR_MASK(chan)); | 862 | val = __raw_readl(XOR_INTR_MASK(chan)); |
843 | dev_err(mv_chan_to_devp(chan), | 863 | dev_printk(KERN_ERR, chan->device->common.dev, |
844 | "intr mask 0x%08x.\n", val); | 864 | "intr mask 0x%08x.\n", val); |
845 | 865 | ||
846 | val = __raw_readl(XOR_ERROR_CAUSE(chan)); | 866 | val = __raw_readl(XOR_ERROR_CAUSE(chan)); |
847 | dev_err(mv_chan_to_devp(chan), | 867 | dev_printk(KERN_ERR, chan->device->common.dev, |
848 | "error cause 0x%08x.\n", val); | 868 | "error cause 0x%08x.\n", val); |
849 | 869 | ||
850 | val = __raw_readl(XOR_ERROR_ADDR(chan)); | 870 | val = __raw_readl(XOR_ERROR_ADDR(chan)); |
851 | dev_err(mv_chan_to_devp(chan), | 871 | dev_printk(KERN_ERR, chan->device->common.dev, |
852 | "error addr 0x%08x.\n", val); | 872 | "error addr 0x%08x.\n", val); |
853 | } | 873 | } |
854 | 874 | ||
855 | static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, | 875 | static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, |
856 | u32 intr_cause) | 876 | u32 intr_cause) |
857 | { | 877 | { |
858 | if (intr_cause & (1 << 4)) { | 878 | if (intr_cause & (1 << 4)) { |
859 | dev_dbg(mv_chan_to_devp(chan), | 879 | dev_dbg(chan->device->common.dev, |
860 | "ignore this error\n"); | 880 | "ignore this error\n"); |
861 | return; | 881 | return; |
862 | } | 882 | } |
863 | 883 | ||
864 | dev_err(mv_chan_to_devp(chan), | 884 | dev_printk(KERN_ERR, chan->device->common.dev, |
865 | "error on chan %d. intr cause 0x%08x.\n", | 885 | "error on chan %d. intr cause 0x%08x.\n", |
866 | chan->idx, intr_cause); | 886 | chan->idx, intr_cause); |
867 | 887 | ||
868 | mv_dump_xor_regs(chan); | 888 | mv_dump_xor_regs(chan); |
869 | BUG(); | 889 | BUG(); |
@@ -874,7 +894,7 @@ static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) | |||
874 | struct mv_xor_chan *chan = data; | 894 | struct mv_xor_chan *chan = data; |
875 | u32 intr_cause = mv_chan_get_intr_cause(chan); | 895 | u32 intr_cause = mv_chan_get_intr_cause(chan); |
876 | 896 | ||
877 | dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause); | 897 | dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause); |
878 | 898 | ||
879 | if (mv_is_err_intr(intr_cause)) | 899 | if (mv_is_err_intr(intr_cause)) |
880 | mv_xor_err_interrupt_handler(chan, intr_cause); | 900 | mv_xor_err_interrupt_handler(chan, intr_cause); |
@@ -901,7 +921,7 @@ static void mv_xor_issue_pending(struct dma_chan *chan) | |||
901 | */ | 921 | */ |
902 | #define MV_XOR_TEST_SIZE 2000 | 922 | #define MV_XOR_TEST_SIZE 2000 |
903 | 923 | ||
904 | static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) | 924 | static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device) |
905 | { | 925 | { |
906 | int i; | 926 | int i; |
907 | void *src, *dest; | 927 | void *src, *dest; |
@@ -910,6 +930,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) | |||
910 | dma_cookie_t cookie; | 930 | dma_cookie_t cookie; |
911 | struct dma_async_tx_descriptor *tx; | 931 | struct dma_async_tx_descriptor *tx; |
912 | int err = 0; | 932 | int err = 0; |
933 | struct mv_xor_chan *mv_chan; | ||
913 | 934 | ||
914 | src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); | 935 | src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); |
915 | if (!src) | 936 | if (!src) |
@@ -925,7 +946,10 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) | |||
925 | for (i = 0; i < MV_XOR_TEST_SIZE; i++) | 946 | for (i = 0; i < MV_XOR_TEST_SIZE; i++) |
926 | ((u8 *) src)[i] = (u8)i; | 947 | ((u8 *) src)[i] = (u8)i; |
927 | 948 | ||
928 | dma_chan = &mv_chan->dmachan; | 949 | /* Start copy, using first DMA channel */ |
950 | dma_chan = container_of(device->common.channels.next, | ||
951 | struct dma_chan, | ||
952 | device_node); | ||
929 | if (mv_xor_alloc_chan_resources(dma_chan) < 1) { | 953 | if (mv_xor_alloc_chan_resources(dma_chan) < 1) { |
930 | err = -ENODEV; | 954 | err = -ENODEV; |
931 | goto out; | 955 | goto out; |
@@ -946,17 +970,18 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) | |||
946 | 970 | ||
947 | if (mv_xor_status(dma_chan, cookie, NULL) != | 971 | if (mv_xor_status(dma_chan, cookie, NULL) != |
948 | DMA_SUCCESS) { | 972 | DMA_SUCCESS) { |
949 | dev_err(dma_chan->device->dev, | 973 | dev_printk(KERN_ERR, dma_chan->device->dev, |
950 | "Self-test copy timed out, disabling\n"); | 974 | "Self-test copy timed out, disabling\n"); |
951 | err = -ENODEV; | 975 | err = -ENODEV; |
952 | goto free_resources; | 976 | goto free_resources; |
953 | } | 977 | } |
954 | 978 | ||
955 | dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, | 979 | mv_chan = to_mv_xor_chan(dma_chan); |
980 | dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma, | ||
956 | MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); | 981 | MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); |
957 | if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { | 982 | if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { |
958 | dev_err(dma_chan->device->dev, | 983 | dev_printk(KERN_ERR, dma_chan->device->dev, |
959 | "Self-test copy failed compare, disabling\n"); | 984 | "Self-test copy failed compare, disabling\n"); |
960 | err = -ENODEV; | 985 | err = -ENODEV; |
961 | goto free_resources; | 986 | goto free_resources; |
962 | } | 987 | } |
@@ -970,8 +995,8 @@ out: | |||
970 | } | 995 | } |
971 | 996 | ||
972 | #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */ | 997 | #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */ |
973 | static int | 998 | static int __devinit |
974 | mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) | 999 | mv_xor_xor_self_test(struct mv_xor_device *device) |
975 | { | 1000 | { |
976 | int i, src_idx; | 1001 | int i, src_idx; |
977 | struct page *dest; | 1002 | struct page *dest; |
@@ -984,6 +1009,7 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) | |||
984 | u8 cmp_byte = 0; | 1009 | u8 cmp_byte = 0; |
985 | u32 cmp_word; | 1010 | u32 cmp_word; |
986 | int err = 0; | 1011 | int err = 0; |
1012 | struct mv_xor_chan *mv_chan; | ||
987 | 1013 | ||
988 | for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { | 1014 | for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { |
989 | xor_srcs[src_idx] = alloc_page(GFP_KERNEL); | 1015 | xor_srcs[src_idx] = alloc_page(GFP_KERNEL); |
@@ -1016,7 +1042,9 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) | |||
1016 | 1042 | ||
1017 | memset(page_address(dest), 0, PAGE_SIZE); | 1043 | memset(page_address(dest), 0, PAGE_SIZE); |
1018 | 1044 | ||
1019 | dma_chan = &mv_chan->dmachan; | 1045 | dma_chan = container_of(device->common.channels.next, |
1046 | struct dma_chan, | ||
1047 | device_node); | ||
1020 | if (mv_xor_alloc_chan_resources(dma_chan) < 1) { | 1048 | if (mv_xor_alloc_chan_resources(dma_chan) < 1) { |
1021 | err = -ENODEV; | 1049 | err = -ENODEV; |
1022 | goto out; | 1050 | goto out; |
@@ -1040,21 +1068,22 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) | |||
1040 | 1068 | ||
1041 | if (mv_xor_status(dma_chan, cookie, NULL) != | 1069 | if (mv_xor_status(dma_chan, cookie, NULL) != |
1042 | DMA_SUCCESS) { | 1070 | DMA_SUCCESS) { |
1043 | dev_err(dma_chan->device->dev, | 1071 | dev_printk(KERN_ERR, dma_chan->device->dev, |
1044 | "Self-test xor timed out, disabling\n"); | 1072 | "Self-test xor timed out, disabling\n"); |
1045 | err = -ENODEV; | 1073 | err = -ENODEV; |
1046 | goto free_resources; | 1074 | goto free_resources; |
1047 | } | 1075 | } |
1048 | 1076 | ||
1049 | dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, | 1077 | mv_chan = to_mv_xor_chan(dma_chan); |
1078 | dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma, | ||
1050 | PAGE_SIZE, DMA_FROM_DEVICE); | 1079 | PAGE_SIZE, DMA_FROM_DEVICE); |
1051 | for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { | 1080 | for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { |
1052 | u32 *ptr = page_address(dest); | 1081 | u32 *ptr = page_address(dest); |
1053 | if (ptr[i] != cmp_word) { | 1082 | if (ptr[i] != cmp_word) { |
1054 | dev_err(dma_chan->device->dev, | 1083 | dev_printk(KERN_ERR, dma_chan->device->dev, |
1055 | "Self-test xor failed compare, disabling." | 1084 | "Self-test xor failed compare, disabling." |
1056 | " index %d, data %x, expected %x\n", i, | 1085 | " index %d, data %x, expected %x\n", i, |
1057 | ptr[i], cmp_word); | 1086 | ptr[i], cmp_word); |
1058 | err = -ENODEV; | 1087 | err = -ENODEV; |
1059 | goto free_resources; | 1088 | goto free_resources; |
1060 | } | 1089 | } |
@@ -1070,66 +1099,62 @@ out: | |||
1070 | return err; | 1099 | return err; |
1071 | } | 1100 | } |
1072 | 1101 | ||
1073 | /* This driver does not implement any of the optional DMA operations. */ | 1102 | static int __devexit mv_xor_remove(struct platform_device *dev) |
1074 | static int | ||
1075 | mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
1076 | unsigned long arg) | ||
1077 | { | ||
1078 | return -ENOSYS; | ||
1079 | } | ||
1080 | |||
1081 | static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan) | ||
1082 | { | 1103 | { |
1104 | struct mv_xor_device *device = platform_get_drvdata(dev); | ||
1083 | struct dma_chan *chan, *_chan; | 1105 | struct dma_chan *chan, *_chan; |
1084 | struct device *dev = mv_chan->dmadev.dev; | 1106 | struct mv_xor_chan *mv_chan; |
1107 | struct mv_xor_platform_data *plat_data = dev->dev.platform_data; | ||
1085 | 1108 | ||
1086 | dma_async_device_unregister(&mv_chan->dmadev); | 1109 | dma_async_device_unregister(&device->common); |
1087 | 1110 | ||
1088 | dma_free_coherent(dev, MV_XOR_POOL_SIZE, | 1111 | dma_free_coherent(&dev->dev, plat_data->pool_size, |
1089 | mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); | 1112 | device->dma_desc_pool_virt, device->dma_desc_pool); |
1090 | 1113 | ||
1091 | list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels, | 1114 | list_for_each_entry_safe(chan, _chan, &device->common.channels, |
1092 | device_node) { | 1115 | device_node) { |
1116 | mv_chan = to_mv_xor_chan(chan); | ||
1093 | list_del(&chan->device_node); | 1117 | list_del(&chan->device_node); |
1094 | } | 1118 | } |
1095 | 1119 | ||
1096 | free_irq(mv_chan->irq, mv_chan); | ||
1097 | |||
1098 | return 0; | 1120 | return 0; |
1099 | } | 1121 | } |
1100 | 1122 | ||
1101 | static struct mv_xor_chan * | 1123 | static int __devinit mv_xor_probe(struct platform_device *pdev) |
1102 | mv_xor_channel_add(struct mv_xor_device *xordev, | ||
1103 | struct platform_device *pdev, | ||
1104 | int idx, dma_cap_mask_t cap_mask, int irq) | ||
1105 | { | 1124 | { |
1106 | int ret = 0; | 1125 | int ret = 0; |
1126 | int irq; | ||
1127 | struct mv_xor_device *adev; | ||
1107 | struct mv_xor_chan *mv_chan; | 1128 | struct mv_xor_chan *mv_chan; |
1108 | struct dma_device *dma_dev; | 1129 | struct dma_device *dma_dev; |
1130 | struct mv_xor_platform_data *plat_data = pdev->dev.platform_data; | ||
1109 | 1131 | ||
1110 | mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); | ||
1111 | if (!mv_chan) { | ||
1112 | ret = -ENOMEM; | ||
1113 | goto err_free_dma; | ||
1114 | } | ||
1115 | 1132 | ||
1116 | mv_chan->idx = idx; | 1133 | adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL); |
1117 | mv_chan->irq = irq; | 1134 | if (!adev) |
1135 | return -ENOMEM; | ||
1118 | 1136 | ||
1119 | dma_dev = &mv_chan->dmadev; | 1137 | dma_dev = &adev->common; |
1120 | 1138 | ||
1121 | /* allocate coherent memory for hardware descriptors | 1139 | /* allocate coherent memory for hardware descriptors |
1122 | * note: writecombine gives slightly better performance, but | 1140 | * note: writecombine gives slightly better performance, but |
1123 | * requires that we explicitly flush the writes | 1141 | * requires that we explicitly flush the writes |
1124 | */ | 1142 | */ |
1125 | mv_chan->dma_desc_pool_virt = | 1143 | adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev, |
1126 | dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE, | 1144 | plat_data->pool_size, |
1127 | &mv_chan->dma_desc_pool, GFP_KERNEL); | 1145 | &adev->dma_desc_pool, |
1128 | if (!mv_chan->dma_desc_pool_virt) | 1146 | GFP_KERNEL); |
1129 | return ERR_PTR(-ENOMEM); | 1147 | if (!adev->dma_desc_pool_virt) |
1148 | return -ENOMEM; | ||
1149 | |||
1150 | adev->id = plat_data->hw_id; | ||
1130 | 1151 | ||
1131 | /* discover transaction capabilites from the platform data */ | 1152 | /* discover transaction capabilites from the platform data */ |
1132 | dma_dev->cap_mask = cap_mask; | 1153 | dma_dev->cap_mask = plat_data->cap_mask; |
1154 | adev->pdev = pdev; | ||
1155 | platform_set_drvdata(pdev, adev); | ||
1156 | |||
1157 | adev->shared = platform_get_drvdata(plat_data->shared); | ||
1133 | 1158 | ||
1134 | INIT_LIST_HEAD(&dma_dev->channels); | 1159 | INIT_LIST_HEAD(&dma_dev->channels); |
1135 | 1160 | ||
@@ -1138,7 +1163,6 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1138 | dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; | 1163 | dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; |
1139 | dma_dev->device_tx_status = mv_xor_status; | 1164 | dma_dev->device_tx_status = mv_xor_status; |
1140 | dma_dev->device_issue_pending = mv_xor_issue_pending; | 1165 | dma_dev->device_issue_pending = mv_xor_issue_pending; |
1141 | dma_dev->device_control = mv_xor_control; | ||
1142 | dma_dev->dev = &pdev->dev; | 1166 | dma_dev->dev = &pdev->dev; |
1143 | 1167 | ||
1144 | /* set prep routines based on capability */ | 1168 | /* set prep routines based on capability */ |
@@ -1151,7 +1175,15 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1151 | dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; | 1175 | dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; |
1152 | } | 1176 | } |
1153 | 1177 | ||
1154 | mv_chan->mmr_base = xordev->xor_base; | 1178 | mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); |
1179 | if (!mv_chan) { | ||
1180 | ret = -ENOMEM; | ||
1181 | goto err_free_dma; | ||
1182 | } | ||
1183 | mv_chan->device = adev; | ||
1184 | mv_chan->idx = plat_data->hw_id; | ||
1185 | mv_chan->mmr_base = adev->shared->xor_base; | ||
1186 | |||
1155 | if (!mv_chan->mmr_base) { | 1187 | if (!mv_chan->mmr_base) { |
1156 | ret = -ENOMEM; | 1188 | ret = -ENOMEM; |
1157 | goto err_free_dma; | 1189 | goto err_free_dma; |
@@ -1162,8 +1194,14 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1162 | /* clear errors before enabling interrupts */ | 1194 | /* clear errors before enabling interrupts */ |
1163 | mv_xor_device_clear_err_status(mv_chan); | 1195 | mv_xor_device_clear_err_status(mv_chan); |
1164 | 1196 | ||
1165 | ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler, | 1197 | irq = platform_get_irq(pdev, 0); |
1166 | 0, dev_name(&pdev->dev), mv_chan); | 1198 | if (irq < 0) { |
1199 | ret = irq; | ||
1200 | goto err_free_dma; | ||
1201 | } | ||
1202 | ret = devm_request_irq(&pdev->dev, irq, | ||
1203 | mv_xor_interrupt_handler, | ||
1204 | 0, dev_name(&pdev->dev), mv_chan); | ||
1167 | if (ret) | 1205 | if (ret) |
1168 | goto err_free_dma; | 1206 | goto err_free_dma; |
1169 | 1207 | ||
@@ -1175,26 +1213,25 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1175 | INIT_LIST_HEAD(&mv_chan->chain); | 1213 | INIT_LIST_HEAD(&mv_chan->chain); |
1176 | INIT_LIST_HEAD(&mv_chan->completed_slots); | 1214 | INIT_LIST_HEAD(&mv_chan->completed_slots); |
1177 | INIT_LIST_HEAD(&mv_chan->all_slots); | 1215 | INIT_LIST_HEAD(&mv_chan->all_slots); |
1178 | mv_chan->dmachan.device = dma_dev; | 1216 | mv_chan->common.device = dma_dev; |
1179 | dma_cookie_init(&mv_chan->dmachan); | ||
1180 | 1217 | ||
1181 | list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels); | 1218 | list_add_tail(&mv_chan->common.device_node, &dma_dev->channels); |
1182 | 1219 | ||
1183 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { | 1220 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { |
1184 | ret = mv_xor_memcpy_self_test(mv_chan); | 1221 | ret = mv_xor_memcpy_self_test(adev); |
1185 | dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); | 1222 | dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); |
1186 | if (ret) | 1223 | if (ret) |
1187 | goto err_free_irq; | 1224 | goto err_free_dma; |
1188 | } | 1225 | } |
1189 | 1226 | ||
1190 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | 1227 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { |
1191 | ret = mv_xor_xor_self_test(mv_chan); | 1228 | ret = mv_xor_xor_self_test(adev); |
1192 | dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); | 1229 | dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); |
1193 | if (ret) | 1230 | if (ret) |
1194 | goto err_free_irq; | 1231 | goto err_free_dma; |
1195 | } | 1232 | } |
1196 | 1233 | ||
1197 | dev_info(&pdev->dev, "Marvell XOR: " | 1234 | dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: " |
1198 | "( %s%s%s%s)\n", | 1235 | "( %s%s%s%s)\n", |
1199 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", | 1236 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", |
1200 | dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", | 1237 | dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", |
@@ -1202,21 +1239,20 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1202 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); | 1239 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); |
1203 | 1240 | ||
1204 | dma_async_device_register(dma_dev); | 1241 | dma_async_device_register(dma_dev); |
1205 | return mv_chan; | 1242 | goto out; |
1206 | 1243 | ||
1207 | err_free_irq: | ||
1208 | free_irq(mv_chan->irq, mv_chan); | ||
1209 | err_free_dma: | 1244 | err_free_dma: |
1210 | dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE, | 1245 | dma_free_coherent(&adev->pdev->dev, plat_data->pool_size, |
1211 | mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); | 1246 | adev->dma_desc_pool_virt, adev->dma_desc_pool); |
1212 | return ERR_PTR(ret); | 1247 | out: |
1248 | return ret; | ||
1213 | } | 1249 | } |
1214 | 1250 | ||
1215 | static void | 1251 | static void |
1216 | mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, | 1252 | mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp, |
1217 | const struct mbus_dram_target_info *dram) | 1253 | struct mbus_dram_target_info *dram) |
1218 | { | 1254 | { |
1219 | void __iomem *base = xordev->xor_base; | 1255 | void __iomem *base = msp->xor_base; |
1220 | u32 win_enable = 0; | 1256 | u32 win_enable = 0; |
1221 | int i; | 1257 | int i; |
1222 | 1258 | ||
@@ -1228,7 +1264,7 @@ mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, | |||
1228 | } | 1264 | } |
1229 | 1265 | ||
1230 | for (i = 0; i < dram->num_cs; i++) { | 1266 | for (i = 0; i < dram->num_cs; i++) { |
1231 | const struct mbus_dram_window *cs = dram->cs + i; | 1267 | struct mbus_dram_window *cs = dram->cs + i; |
1232 | 1268 | ||
1233 | writel((cs->base & 0xffff0000) | | 1269 | writel((cs->base & 0xffff0000) | |
1234 | (cs->mbus_attr << 8) | | 1270 | (cs->mbus_attr << 8) | |
@@ -1241,179 +1277,84 @@ mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, | |||
1241 | 1277 | ||
1242 | writel(win_enable, base + WINDOW_BAR_ENABLE(0)); | 1278 | writel(win_enable, base + WINDOW_BAR_ENABLE(0)); |
1243 | writel(win_enable, base + WINDOW_BAR_ENABLE(1)); | 1279 | writel(win_enable, base + WINDOW_BAR_ENABLE(1)); |
1244 | writel(0, base + WINDOW_OVERRIDE_CTRL(0)); | ||
1245 | writel(0, base + WINDOW_OVERRIDE_CTRL(1)); | ||
1246 | } | 1280 | } |
1247 | 1281 | ||
1248 | static int mv_xor_probe(struct platform_device *pdev) | 1282 | static struct platform_driver mv_xor_driver = { |
1283 | .probe = mv_xor_probe, | ||
1284 | .remove = __devexit_p(mv_xor_remove), | ||
1285 | .driver = { | ||
1286 | .owner = THIS_MODULE, | ||
1287 | .name = MV_XOR_NAME, | ||
1288 | }, | ||
1289 | }; | ||
1290 | |||
1291 | static int mv_xor_shared_probe(struct platform_device *pdev) | ||
1249 | { | 1292 | { |
1250 | const struct mbus_dram_target_info *dram; | 1293 | struct mv_xor_platform_shared_data *msd = pdev->dev.platform_data; |
1251 | struct mv_xor_device *xordev; | 1294 | struct mv_xor_shared_private *msp; |
1252 | struct mv_xor_platform_data *pdata = pdev->dev.platform_data; | ||
1253 | struct resource *res; | 1295 | struct resource *res; |
1254 | int i, ret; | ||
1255 | 1296 | ||
1256 | dev_notice(&pdev->dev, "Marvell XOR driver\n"); | 1297 | dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n"); |
1257 | 1298 | ||
1258 | xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL); | 1299 | msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL); |
1259 | if (!xordev) | 1300 | if (!msp) |
1260 | return -ENOMEM; | 1301 | return -ENOMEM; |
1261 | 1302 | ||
1262 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1303 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1263 | if (!res) | 1304 | if (!res) |
1264 | return -ENODEV; | 1305 | return -ENODEV; |
1265 | 1306 | ||
1266 | xordev->xor_base = devm_ioremap(&pdev->dev, res->start, | 1307 | msp->xor_base = devm_ioremap(&pdev->dev, res->start, |
1267 | resource_size(res)); | 1308 | resource_size(res)); |
1268 | if (!xordev->xor_base) | 1309 | if (!msp->xor_base) |
1269 | return -EBUSY; | 1310 | return -EBUSY; |
1270 | 1311 | ||
1271 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 1312 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
1272 | if (!res) | 1313 | if (!res) |
1273 | return -ENODEV; | 1314 | return -ENODEV; |
1274 | 1315 | ||
1275 | xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start, | 1316 | msp->xor_high_base = devm_ioremap(&pdev->dev, res->start, |
1276 | resource_size(res)); | 1317 | resource_size(res)); |
1277 | if (!xordev->xor_high_base) | 1318 | if (!msp->xor_high_base) |
1278 | return -EBUSY; | 1319 | return -EBUSY; |
1279 | 1320 | ||
1280 | platform_set_drvdata(pdev, xordev); | 1321 | platform_set_drvdata(pdev, msp); |
1281 | 1322 | ||
1282 | /* | 1323 | /* |
1283 | * (Re-)program MBUS remapping windows if we are asked to. | 1324 | * (Re-)program MBUS remapping windows if we are asked to. |
1284 | */ | 1325 | */ |
1285 | dram = mv_mbus_dram_info(); | 1326 | if (msd != NULL && msd->dram != NULL) |
1286 | if (dram) | 1327 | mv_xor_conf_mbus_windows(msp, msd->dram); |
1287 | mv_xor_conf_mbus_windows(xordev, dram); | ||
1288 | |||
1289 | /* Not all platforms can gate the clock, so it is not | ||
1290 | * an error if the clock does not exists. | ||
1291 | */ | ||
1292 | xordev->clk = clk_get(&pdev->dev, NULL); | ||
1293 | if (!IS_ERR(xordev->clk)) | ||
1294 | clk_prepare_enable(xordev->clk); | ||
1295 | |||
1296 | if (pdev->dev.of_node) { | ||
1297 | struct device_node *np; | ||
1298 | int i = 0; | ||
1299 | |||
1300 | for_each_child_of_node(pdev->dev.of_node, np) { | ||
1301 | dma_cap_mask_t cap_mask; | ||
1302 | int irq; | ||
1303 | |||
1304 | dma_cap_zero(cap_mask); | ||
1305 | if (of_property_read_bool(np, "dmacap,memcpy")) | ||
1306 | dma_cap_set(DMA_MEMCPY, cap_mask); | ||
1307 | if (of_property_read_bool(np, "dmacap,xor")) | ||
1308 | dma_cap_set(DMA_XOR, cap_mask); | ||
1309 | if (of_property_read_bool(np, "dmacap,memset")) | ||
1310 | dma_cap_set(DMA_MEMSET, cap_mask); | ||
1311 | if (of_property_read_bool(np, "dmacap,interrupt")) | ||
1312 | dma_cap_set(DMA_INTERRUPT, cap_mask); | ||
1313 | |||
1314 | irq = irq_of_parse_and_map(np, 0); | ||
1315 | if (!irq) { | ||
1316 | ret = -ENODEV; | ||
1317 | goto err_channel_add; | ||
1318 | } | ||
1319 | |||
1320 | xordev->channels[i] = | ||
1321 | mv_xor_channel_add(xordev, pdev, i, | ||
1322 | cap_mask, irq); | ||
1323 | if (IS_ERR(xordev->channels[i])) { | ||
1324 | ret = PTR_ERR(xordev->channels[i]); | ||
1325 | xordev->channels[i] = NULL; | ||
1326 | irq_dispose_mapping(irq); | ||
1327 | goto err_channel_add; | ||
1328 | } | ||
1329 | |||
1330 | i++; | ||
1331 | } | ||
1332 | } else if (pdata && pdata->channels) { | ||
1333 | for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { | ||
1334 | struct mv_xor_channel_data *cd; | ||
1335 | int irq; | ||
1336 | |||
1337 | cd = &pdata->channels[i]; | ||
1338 | if (!cd) { | ||
1339 | ret = -ENODEV; | ||
1340 | goto err_channel_add; | ||
1341 | } | ||
1342 | |||
1343 | irq = platform_get_irq(pdev, i); | ||
1344 | if (irq < 0) { | ||
1345 | ret = irq; | ||
1346 | goto err_channel_add; | ||
1347 | } | ||
1348 | |||
1349 | xordev->channels[i] = | ||
1350 | mv_xor_channel_add(xordev, pdev, i, | ||
1351 | cd->cap_mask, irq); | ||
1352 | if (IS_ERR(xordev->channels[i])) { | ||
1353 | ret = PTR_ERR(xordev->channels[i]); | ||
1354 | goto err_channel_add; | ||
1355 | } | ||
1356 | } | ||
1357 | } | ||
1358 | 1328 | ||
1359 | return 0; | 1329 | return 0; |
1360 | |||
1361 | err_channel_add: | ||
1362 | for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) | ||
1363 | if (xordev->channels[i]) { | ||
1364 | mv_xor_channel_remove(xordev->channels[i]); | ||
1365 | if (pdev->dev.of_node) | ||
1366 | irq_dispose_mapping(xordev->channels[i]->irq); | ||
1367 | } | ||
1368 | |||
1369 | if (!IS_ERR(xordev->clk)) { | ||
1370 | clk_disable_unprepare(xordev->clk); | ||
1371 | clk_put(xordev->clk); | ||
1372 | } | ||
1373 | |||
1374 | return ret; | ||
1375 | } | 1330 | } |
1376 | 1331 | ||
1377 | static int mv_xor_remove(struct platform_device *pdev) | 1332 | static int mv_xor_shared_remove(struct platform_device *pdev) |
1378 | { | 1333 | { |
1379 | struct mv_xor_device *xordev = platform_get_drvdata(pdev); | ||
1380 | int i; | ||
1381 | |||
1382 | for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { | ||
1383 | if (xordev->channels[i]) | ||
1384 | mv_xor_channel_remove(xordev->channels[i]); | ||
1385 | } | ||
1386 | |||
1387 | if (!IS_ERR(xordev->clk)) { | ||
1388 | clk_disable_unprepare(xordev->clk); | ||
1389 | clk_put(xordev->clk); | ||
1390 | } | ||
1391 | |||
1392 | return 0; | 1334 | return 0; |
1393 | } | 1335 | } |
1394 | 1336 | ||
1395 | #ifdef CONFIG_OF | 1337 | static struct platform_driver mv_xor_shared_driver = { |
1396 | static struct of_device_id mv_xor_dt_ids[] = { | 1338 | .probe = mv_xor_shared_probe, |
1397 | { .compatible = "marvell,orion-xor", }, | 1339 | .remove = mv_xor_shared_remove, |
1398 | {}, | ||
1399 | }; | ||
1400 | MODULE_DEVICE_TABLE(of, mv_xor_dt_ids); | ||
1401 | #endif | ||
1402 | |||
1403 | static struct platform_driver mv_xor_driver = { | ||
1404 | .probe = mv_xor_probe, | ||
1405 | .remove = mv_xor_remove, | ||
1406 | .driver = { | 1340 | .driver = { |
1407 | .owner = THIS_MODULE, | 1341 | .owner = THIS_MODULE, |
1408 | .name = MV_XOR_NAME, | 1342 | .name = MV_XOR_SHARED_NAME, |
1409 | .of_match_table = of_match_ptr(mv_xor_dt_ids), | ||
1410 | }, | 1343 | }, |
1411 | }; | 1344 | }; |
1412 | 1345 | ||
1413 | 1346 | ||
1414 | static int __init mv_xor_init(void) | 1347 | static int __init mv_xor_init(void) |
1415 | { | 1348 | { |
1416 | return platform_driver_register(&mv_xor_driver); | 1349 | int rc; |
1350 | |||
1351 | rc = platform_driver_register(&mv_xor_shared_driver); | ||
1352 | if (!rc) { | ||
1353 | rc = platform_driver_register(&mv_xor_driver); | ||
1354 | if (rc) | ||
1355 | platform_driver_unregister(&mv_xor_shared_driver); | ||
1356 | } | ||
1357 | return rc; | ||
1417 | } | 1358 | } |
1418 | module_init(mv_xor_init); | 1359 | module_init(mv_xor_init); |
1419 | 1360 | ||
@@ -1422,6 +1363,7 @@ module_init(mv_xor_init); | |||
1422 | static void __exit mv_xor_exit(void) | 1363 | static void __exit mv_xor_exit(void) |
1423 | { | 1364 | { |
1424 | platform_driver_unregister(&mv_xor_driver); | 1365 | platform_driver_unregister(&mv_xor_driver); |
1366 | platform_driver_unregister(&mv_xor_shared_driver); | ||
1425 | return; | 1367 | return; |
1426 | } | 1368 | } |
1427 | 1369 | ||
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h index c632a4761fc..977b592e976 100644 --- a/drivers/dma/mv_xor.h +++ b/drivers/dma/mv_xor.h | |||
@@ -24,10 +24,8 @@ | |||
24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | 25 | ||
26 | #define USE_TIMER | 26 | #define USE_TIMER |
27 | #define MV_XOR_POOL_SIZE PAGE_SIZE | ||
28 | #define MV_XOR_SLOT_SIZE 64 | 27 | #define MV_XOR_SLOT_SIZE 64 |
29 | #define MV_XOR_THRESHOLD 1 | 28 | #define MV_XOR_THRESHOLD 1 |
30 | #define MV_XOR_MAX_CHANNELS 2 | ||
31 | 29 | ||
32 | #define XOR_OPERATION_MODE_XOR 0 | 30 | #define XOR_OPERATION_MODE_XOR 0 |
33 | #define XOR_OPERATION_MODE_MEMCPY 2 | 31 | #define XOR_OPERATION_MODE_MEMCPY 2 |
@@ -53,18 +51,34 @@ | |||
53 | #define WINDOW_SIZE(w) (0x270 + ((w) << 2)) | 51 | #define WINDOW_SIZE(w) (0x270 + ((w) << 2)) |
54 | #define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2)) | 52 | #define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2)) |
55 | #define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2)) | 53 | #define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2)) |
56 | #define WINDOW_OVERRIDE_CTRL(chan) (0x2A0 + ((chan) << 2)) | ||
57 | 54 | ||
55 | struct mv_xor_shared_private { | ||
56 | void __iomem *xor_base; | ||
57 | void __iomem *xor_high_base; | ||
58 | }; | ||
59 | |||
60 | |||
61 | /** | ||
62 | * struct mv_xor_device - internal representation of a XOR device | ||
63 | * @pdev: Platform device | ||
64 | * @id: HW XOR Device selector | ||
65 | * @dma_desc_pool: base of DMA descriptor region (DMA address) | ||
66 | * @dma_desc_pool_virt: base of DMA descriptor region (CPU address) | ||
67 | * @common: embedded struct dma_device | ||
68 | */ | ||
58 | struct mv_xor_device { | 69 | struct mv_xor_device { |
59 | void __iomem *xor_base; | 70 | struct platform_device *pdev; |
60 | void __iomem *xor_high_base; | 71 | int id; |
61 | struct clk *clk; | 72 | dma_addr_t dma_desc_pool; |
62 | struct mv_xor_chan *channels[MV_XOR_MAX_CHANNELS]; | 73 | void *dma_desc_pool_virt; |
74 | struct dma_device common; | ||
75 | struct mv_xor_shared_private *shared; | ||
63 | }; | 76 | }; |
64 | 77 | ||
65 | /** | 78 | /** |
66 | * struct mv_xor_chan - internal representation of a XOR channel | 79 | * struct mv_xor_chan - internal representation of a XOR channel |
67 | * @pending: allows batching of hardware operations | 80 | * @pending: allows batching of hardware operations |
81 | * @completed_cookie: identifier for the most recently completed operation | ||
68 | * @lock: serializes enqueue/dequeue operations to the descriptors pool | 82 | * @lock: serializes enqueue/dequeue operations to the descriptors pool |
69 | * @mmr_base: memory mapped register base | 83 | * @mmr_base: memory mapped register base |
70 | * @idx: the index of the xor channel | 84 | * @idx: the index of the xor channel |
@@ -79,18 +93,15 @@ struct mv_xor_device { | |||
79 | */ | 93 | */ |
80 | struct mv_xor_chan { | 94 | struct mv_xor_chan { |
81 | int pending; | 95 | int pending; |
96 | dma_cookie_t completed_cookie; | ||
82 | spinlock_t lock; /* protects the descriptor slot pool */ | 97 | spinlock_t lock; /* protects the descriptor slot pool */ |
83 | void __iomem *mmr_base; | 98 | void __iomem *mmr_base; |
84 | unsigned int idx; | 99 | unsigned int idx; |
85 | int irq; | ||
86 | enum dma_transaction_type current_type; | 100 | enum dma_transaction_type current_type; |
87 | struct list_head chain; | 101 | struct list_head chain; |
88 | struct list_head completed_slots; | 102 | struct list_head completed_slots; |
89 | dma_addr_t dma_desc_pool; | 103 | struct mv_xor_device *device; |
90 | void *dma_desc_pool_virt; | 104 | struct dma_chan common; |
91 | size_t pool_size; | ||
92 | struct dma_device dmadev; | ||
93 | struct dma_chan dmachan; | ||
94 | struct mv_xor_desc_slot *last_used; | 105 | struct mv_xor_desc_slot *last_used; |
95 | struct list_head all_slots; | 106 | struct list_head all_slots; |
96 | int slots_allocated; | 107 | int slots_allocated; |
@@ -98,6 +109,7 @@ struct mv_xor_chan { | |||
98 | #ifdef USE_TIMER | 109 | #ifdef USE_TIMER |
99 | unsigned long cleanup_time; | 110 | unsigned long cleanup_time; |
100 | u32 current_on_last_cleanup; | 111 | u32 current_on_last_cleanup; |
112 | dma_cookie_t is_complete_cookie; | ||
101 | #endif | 113 | #endif |
102 | }; | 114 | }; |
103 | 115 | ||
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 9f02e794b12..be641cbd36f 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c | |||
@@ -22,15 +22,11 @@ | |||
22 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
23 | #include <linux/dmaengine.h> | 23 | #include <linux/dmaengine.h> |
24 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
25 | #include <linux/module.h> | ||
26 | #include <linux/fsl/mxs-dma.h> | ||
27 | #include <linux/stmp_device.h> | ||
28 | #include <linux/of.h> | ||
29 | #include <linux/of_device.h> | ||
30 | 25 | ||
31 | #include <asm/irq.h> | 26 | #include <asm/irq.h> |
32 | 27 | #include <mach/mxs.h> | |
33 | #include "dmaengine.h" | 28 | #include <mach/dma.h> |
29 | #include <mach/common.h> | ||
34 | 30 | ||
35 | /* | 31 | /* |
36 | * NOTE: The term "PIO" throughout the mxs-dma implementation means | 32 | * NOTE: The term "PIO" throughout the mxs-dma implementation means |
@@ -38,25 +34,29 @@ | |||
38 | * dma can program the controller registers of peripheral devices. | 34 | * dma can program the controller registers of peripheral devices. |
39 | */ | 35 | */ |
40 | 36 | ||
41 | #define dma_is_apbh(mxs_dma) ((mxs_dma)->type == MXS_DMA_APBH) | 37 | #define MXS_DMA_APBH 0 |
42 | #define apbh_is_old(mxs_dma) ((mxs_dma)->dev_id == IMX23_DMA) | 38 | #define MXS_DMA_APBX 1 |
39 | #define dma_is_apbh() (mxs_dma->dev_id == MXS_DMA_APBH) | ||
40 | |||
41 | #define APBH_VERSION_LATEST 3 | ||
42 | #define apbh_is_old() (mxs_dma->version < APBH_VERSION_LATEST) | ||
43 | 43 | ||
44 | #define HW_APBHX_CTRL0 0x000 | 44 | #define HW_APBHX_CTRL0 0x000 |
45 | #define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29) | 45 | #define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29) |
46 | #define BM_APBH_CTRL0_APB_BURST_EN (1 << 28) | 46 | #define BM_APBH_CTRL0_APB_BURST_EN (1 << 28) |
47 | #define BP_APBH_CTRL0_CLKGATE_CHANNEL 8 | ||
47 | #define BP_APBH_CTRL0_RESET_CHANNEL 16 | 48 | #define BP_APBH_CTRL0_RESET_CHANNEL 16 |
48 | #define HW_APBHX_CTRL1 0x010 | 49 | #define HW_APBHX_CTRL1 0x010 |
49 | #define HW_APBHX_CTRL2 0x020 | 50 | #define HW_APBHX_CTRL2 0x020 |
50 | #define HW_APBHX_CHANNEL_CTRL 0x030 | 51 | #define HW_APBHX_CHANNEL_CTRL 0x030 |
51 | #define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16 | 52 | #define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16 |
52 | /* | 53 | #define HW_APBH_VERSION (cpu_is_mx23() ? 0x3f0 : 0x800) |
53 | * The offset of NXTCMDAR register is different per both dma type and version, | 54 | #define HW_APBX_VERSION 0x800 |
54 | * while stride for each channel is all the same 0x70. | 55 | #define BP_APBHX_VERSION_MAJOR 24 |
55 | */ | 56 | #define HW_APBHX_CHn_NXTCMDAR(n) \ |
56 | #define HW_APBHX_CHn_NXTCMDAR(d, n) \ | 57 | (((dma_is_apbh() && apbh_is_old()) ? 0x050 : 0x110) + (n) * 0x70) |
57 | (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70) | 58 | #define HW_APBHX_CHn_SEMA(n) \ |
58 | #define HW_APBHX_CHn_SEMA(d, n) \ | 59 | (((dma_is_apbh() && apbh_is_old()) ? 0x080 : 0x140) + (n) * 0x70) |
59 | (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70) | ||
60 | 60 | ||
61 | /* | 61 | /* |
62 | * ccw bits definitions | 62 | * ccw bits definitions |
@@ -101,8 +101,7 @@ struct mxs_dma_ccw { | |||
101 | u32 pio_words[MXS_PIO_WORDS]; | 101 | u32 pio_words[MXS_PIO_WORDS]; |
102 | }; | 102 | }; |
103 | 103 | ||
104 | #define CCW_BLOCK_SIZE (4 * PAGE_SIZE) | 104 | #define NUM_CCW (int)(PAGE_SIZE / sizeof(struct mxs_dma_ccw)) |
105 | #define NUM_CCW (int)(CCW_BLOCK_SIZE / sizeof(struct mxs_dma_ccw)) | ||
106 | 105 | ||
107 | struct mxs_dma_chan { | 106 | struct mxs_dma_chan { |
108 | struct mxs_dma_engine *mxs_dma; | 107 | struct mxs_dma_engine *mxs_dma; |
@@ -112,7 +111,7 @@ struct mxs_dma_chan { | |||
112 | int chan_irq; | 111 | int chan_irq; |
113 | struct mxs_dma_ccw *ccw; | 112 | struct mxs_dma_ccw *ccw; |
114 | dma_addr_t ccw_phys; | 113 | dma_addr_t ccw_phys; |
115 | int desc_count; | 114 | dma_cookie_t last_completed; |
116 | enum dma_status status; | 115 | enum dma_status status; |
117 | unsigned int flags; | 116 | unsigned int flags; |
118 | #define MXS_DMA_SG_LOOP (1 << 0) | 117 | #define MXS_DMA_SG_LOOP (1 << 0) |
@@ -121,19 +120,9 @@ struct mxs_dma_chan { | |||
121 | #define MXS_DMA_CHANNELS 16 | 120 | #define MXS_DMA_CHANNELS 16 |
122 | #define MXS_DMA_CHANNELS_MASK 0xffff | 121 | #define MXS_DMA_CHANNELS_MASK 0xffff |
123 | 122 | ||
124 | enum mxs_dma_devtype { | ||
125 | MXS_DMA_APBH, | ||
126 | MXS_DMA_APBX, | ||
127 | }; | ||
128 | |||
129 | enum mxs_dma_id { | ||
130 | IMX23_DMA, | ||
131 | IMX28_DMA, | ||
132 | }; | ||
133 | |||
134 | struct mxs_dma_engine { | 123 | struct mxs_dma_engine { |
135 | enum mxs_dma_id dev_id; | 124 | int dev_id; |
136 | enum mxs_dma_devtype type; | 125 | unsigned int version; |
137 | void __iomem *base; | 126 | void __iomem *base; |
138 | struct clk *clk; | 127 | struct clk *clk; |
139 | struct dma_device dma_device; | 128 | struct dma_device dma_device; |
@@ -141,88 +130,17 @@ struct mxs_dma_engine { | |||
141 | struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; | 130 | struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; |
142 | }; | 131 | }; |
143 | 132 | ||
144 | struct mxs_dma_type { | ||
145 | enum mxs_dma_id id; | ||
146 | enum mxs_dma_devtype type; | ||
147 | }; | ||
148 | |||
149 | static struct mxs_dma_type mxs_dma_types[] = { | ||
150 | { | ||
151 | .id = IMX23_DMA, | ||
152 | .type = MXS_DMA_APBH, | ||
153 | }, { | ||
154 | .id = IMX23_DMA, | ||
155 | .type = MXS_DMA_APBX, | ||
156 | }, { | ||
157 | .id = IMX28_DMA, | ||
158 | .type = MXS_DMA_APBH, | ||
159 | }, { | ||
160 | .id = IMX28_DMA, | ||
161 | .type = MXS_DMA_APBX, | ||
162 | } | ||
163 | }; | ||
164 | |||
165 | static struct platform_device_id mxs_dma_ids[] = { | ||
166 | { | ||
167 | .name = "imx23-dma-apbh", | ||
168 | .driver_data = (kernel_ulong_t) &mxs_dma_types[0], | ||
169 | }, { | ||
170 | .name = "imx23-dma-apbx", | ||
171 | .driver_data = (kernel_ulong_t) &mxs_dma_types[1], | ||
172 | }, { | ||
173 | .name = "imx28-dma-apbh", | ||
174 | .driver_data = (kernel_ulong_t) &mxs_dma_types[2], | ||
175 | }, { | ||
176 | .name = "imx28-dma-apbx", | ||
177 | .driver_data = (kernel_ulong_t) &mxs_dma_types[3], | ||
178 | }, { | ||
179 | /* end of list */ | ||
180 | } | ||
181 | }; | ||
182 | |||
183 | static const struct of_device_id mxs_dma_dt_ids[] = { | ||
184 | { .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_ids[0], }, | ||
185 | { .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_ids[1], }, | ||
186 | { .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_ids[2], }, | ||
187 | { .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_ids[3], }, | ||
188 | { /* sentinel */ } | ||
189 | }; | ||
190 | MODULE_DEVICE_TABLE(of, mxs_dma_dt_ids); | ||
191 | |||
192 | static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan) | ||
193 | { | ||
194 | return container_of(chan, struct mxs_dma_chan, chan); | ||
195 | } | ||
196 | |||
197 | int mxs_dma_is_apbh(struct dma_chan *chan) | ||
198 | { | ||
199 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | ||
200 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
201 | |||
202 | return dma_is_apbh(mxs_dma); | ||
203 | } | ||
204 | EXPORT_SYMBOL_GPL(mxs_dma_is_apbh); | ||
205 | |||
206 | int mxs_dma_is_apbx(struct dma_chan *chan) | ||
207 | { | ||
208 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | ||
209 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
210 | |||
211 | return !dma_is_apbh(mxs_dma); | ||
212 | } | ||
213 | EXPORT_SYMBOL_GPL(mxs_dma_is_apbx); | ||
214 | |||
215 | static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) | 133 | static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) |
216 | { | 134 | { |
217 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 135 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
218 | int chan_id = mxs_chan->chan.chan_id; | 136 | int chan_id = mxs_chan->chan.chan_id; |
219 | 137 | ||
220 | if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) | 138 | if (dma_is_apbh() && apbh_is_old()) |
221 | writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL), | 139 | writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL), |
222 | mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); | 140 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); |
223 | else | 141 | else |
224 | writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL), | 142 | writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL), |
225 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); | 143 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR); |
226 | } | 144 | } |
227 | 145 | ||
228 | static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) | 146 | static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) |
@@ -232,14 +150,37 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) | |||
232 | 150 | ||
233 | /* set cmd_addr up */ | 151 | /* set cmd_addr up */ |
234 | writel(mxs_chan->ccw_phys, | 152 | writel(mxs_chan->ccw_phys, |
235 | mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id)); | 153 | mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id)); |
154 | |||
155 | /* enable apbh channel clock */ | ||
156 | if (dma_is_apbh()) { | ||
157 | if (apbh_is_old()) | ||
158 | writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL), | ||
159 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR); | ||
160 | else | ||
161 | writel(1 << chan_id, | ||
162 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR); | ||
163 | } | ||
236 | 164 | ||
237 | /* write 1 to SEMA to kick off the channel */ | 165 | /* write 1 to SEMA to kick off the channel */ |
238 | writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); | 166 | writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id)); |
239 | } | 167 | } |
240 | 168 | ||
241 | static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) | 169 | static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) |
242 | { | 170 | { |
171 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
172 | int chan_id = mxs_chan->chan.chan_id; | ||
173 | |||
174 | /* disable apbh channel clock */ | ||
175 | if (dma_is_apbh()) { | ||
176 | if (apbh_is_old()) | ||
177 | writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL), | ||
178 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); | ||
179 | else | ||
180 | writel(1 << chan_id, | ||
181 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); | ||
182 | } | ||
183 | |||
243 | mxs_chan->status = DMA_SUCCESS; | 184 | mxs_chan->status = DMA_SUCCESS; |
244 | } | 185 | } |
245 | 186 | ||
@@ -249,12 +190,12 @@ static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) | |||
249 | int chan_id = mxs_chan->chan.chan_id; | 190 | int chan_id = mxs_chan->chan.chan_id; |
250 | 191 | ||
251 | /* freeze the channel */ | 192 | /* freeze the channel */ |
252 | if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) | 193 | if (dma_is_apbh() && apbh_is_old()) |
253 | writel(1 << chan_id, | 194 | writel(1 << chan_id, |
254 | mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); | 195 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); |
255 | else | 196 | else |
256 | writel(1 << chan_id, | 197 | writel(1 << chan_id, |
257 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); | 198 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR); |
258 | 199 | ||
259 | mxs_chan->status = DMA_PAUSED; | 200 | mxs_chan->status = DMA_PAUSED; |
260 | } | 201 | } |
@@ -265,19 +206,41 @@ static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan) | |||
265 | int chan_id = mxs_chan->chan.chan_id; | 206 | int chan_id = mxs_chan->chan.chan_id; |
266 | 207 | ||
267 | /* unfreeze the channel */ | 208 | /* unfreeze the channel */ |
268 | if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) | 209 | if (dma_is_apbh() && apbh_is_old()) |
269 | writel(1 << chan_id, | 210 | writel(1 << chan_id, |
270 | mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_CLR); | 211 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR); |
271 | else | 212 | else |
272 | writel(1 << chan_id, | 213 | writel(1 << chan_id, |
273 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR); | 214 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_CLR_ADDR); |
274 | 215 | ||
275 | mxs_chan->status = DMA_IN_PROGRESS; | 216 | mxs_chan->status = DMA_IN_PROGRESS; |
276 | } | 217 | } |
277 | 218 | ||
219 | static dma_cookie_t mxs_dma_assign_cookie(struct mxs_dma_chan *mxs_chan) | ||
220 | { | ||
221 | dma_cookie_t cookie = mxs_chan->chan.cookie; | ||
222 | |||
223 | if (++cookie < 0) | ||
224 | cookie = 1; | ||
225 | |||
226 | mxs_chan->chan.cookie = cookie; | ||
227 | mxs_chan->desc.cookie = cookie; | ||
228 | |||
229 | return cookie; | ||
230 | } | ||
231 | |||
232 | static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan) | ||
233 | { | ||
234 | return container_of(chan, struct mxs_dma_chan, chan); | ||
235 | } | ||
236 | |||
278 | static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) | 237 | static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) |
279 | { | 238 | { |
280 | return dma_cookie_assign(tx); | 239 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(tx->chan); |
240 | |||
241 | mxs_dma_enable_chan(mxs_chan); | ||
242 | |||
243 | return mxs_dma_assign_cookie(mxs_chan); | ||
281 | } | 244 | } |
282 | 245 | ||
283 | static void mxs_dma_tasklet(unsigned long data) | 246 | static void mxs_dma_tasklet(unsigned long data) |
@@ -296,16 +259,16 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) | |||
296 | /* completion status */ | 259 | /* completion status */ |
297 | stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1); | 260 | stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1); |
298 | stat1 &= MXS_DMA_CHANNELS_MASK; | 261 | stat1 &= MXS_DMA_CHANNELS_MASK; |
299 | writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR); | 262 | writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + MXS_CLR_ADDR); |
300 | 263 | ||
301 | /* error status */ | 264 | /* error status */ |
302 | stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2); | 265 | stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2); |
303 | writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR); | 266 | writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + MXS_CLR_ADDR); |
304 | 267 | ||
305 | /* | 268 | /* |
306 | * When both completion and error of termination bits set at the | 269 | * When both completion and error of termination bits set at the |
307 | * same time, we do not take it as an error. IOW, it only becomes | 270 | * same time, we do not take it as an error. IOW, it only becomes |
308 | * an error we need to handle here in case of either it's (1) a bus | 271 | * an error we need to handler here in case of ether it's (1) an bus |
309 | * error or (2) a termination error with no completion. | 272 | * error or (2) a termination error with no completion. |
310 | */ | 273 | */ |
311 | stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */ | 274 | stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */ |
@@ -334,7 +297,7 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) | |||
334 | stat1 &= ~(1 << channel); | 297 | stat1 &= ~(1 << channel); |
335 | 298 | ||
336 | if (mxs_chan->status == DMA_SUCCESS) | 299 | if (mxs_chan->status == DMA_SUCCESS) |
337 | dma_cookie_complete(&mxs_chan->desc); | 300 | mxs_chan->last_completed = mxs_chan->desc.cookie; |
338 | 301 | ||
339 | /* schedule tasklet on this channel */ | 302 | /* schedule tasklet on this channel */ |
340 | tasklet_schedule(&mxs_chan->tasklet); | 303 | tasklet_schedule(&mxs_chan->tasklet); |
@@ -355,15 +318,14 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) | |||
355 | 318 | ||
356 | mxs_chan->chan_irq = data->chan_irq; | 319 | mxs_chan->chan_irq = data->chan_irq; |
357 | 320 | ||
358 | mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, | 321 | mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, |
359 | CCW_BLOCK_SIZE, &mxs_chan->ccw_phys, | 322 | &mxs_chan->ccw_phys, GFP_KERNEL); |
360 | GFP_KERNEL); | ||
361 | if (!mxs_chan->ccw) { | 323 | if (!mxs_chan->ccw) { |
362 | ret = -ENOMEM; | 324 | ret = -ENOMEM; |
363 | goto err_alloc; | 325 | goto err_alloc; |
364 | } | 326 | } |
365 | 327 | ||
366 | memset(mxs_chan->ccw, 0, CCW_BLOCK_SIZE); | 328 | memset(mxs_chan->ccw, 0, PAGE_SIZE); |
367 | 329 | ||
368 | if (mxs_chan->chan_irq != NO_IRQ) { | 330 | if (mxs_chan->chan_irq != NO_IRQ) { |
369 | ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, | 331 | ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, |
@@ -372,7 +334,7 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) | |||
372 | goto err_irq; | 334 | goto err_irq; |
373 | } | 335 | } |
374 | 336 | ||
375 | ret = clk_prepare_enable(mxs_dma->clk); | 337 | ret = clk_enable(mxs_dma->clk); |
376 | if (ret) | 338 | if (ret) |
377 | goto err_clk; | 339 | goto err_clk; |
378 | 340 | ||
@@ -389,7 +351,7 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) | |||
389 | err_clk: | 351 | err_clk: |
390 | free_irq(mxs_chan->chan_irq, mxs_dma); | 352 | free_irq(mxs_chan->chan_irq, mxs_dma); |
391 | err_irq: | 353 | err_irq: |
392 | dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE, | 354 | dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, |
393 | mxs_chan->ccw, mxs_chan->ccw_phys); | 355 | mxs_chan->ccw, mxs_chan->ccw_phys); |
394 | err_alloc: | 356 | err_alloc: |
395 | return ret; | 357 | return ret; |
@@ -404,38 +366,16 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan) | |||
404 | 366 | ||
405 | free_irq(mxs_chan->chan_irq, mxs_dma); | 367 | free_irq(mxs_chan->chan_irq, mxs_dma); |
406 | 368 | ||
407 | dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE, | 369 | dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, |
408 | mxs_chan->ccw, mxs_chan->ccw_phys); | 370 | mxs_chan->ccw, mxs_chan->ccw_phys); |
409 | 371 | ||
410 | clk_disable_unprepare(mxs_dma->clk); | 372 | clk_disable(mxs_dma->clk); |
411 | } | 373 | } |
412 | 374 | ||
413 | /* | ||
414 | * How to use the flags for ->device_prep_slave_sg() : | ||
415 | * [1] If there is only one DMA command in the DMA chain, the code should be: | ||
416 | * ...... | ||
417 | * ->device_prep_slave_sg(DMA_CTRL_ACK); | ||
418 | * ...... | ||
419 | * [2] If there are two DMA commands in the DMA chain, the code should be | ||
420 | * ...... | ||
421 | * ->device_prep_slave_sg(0); | ||
422 | * ...... | ||
423 | * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
424 | * ...... | ||
425 | * [3] If there are more than two DMA commands in the DMA chain, the code | ||
426 | * should be: | ||
427 | * ...... | ||
428 | * ->device_prep_slave_sg(0); // First | ||
429 | * ...... | ||
430 | * ->device_prep_slave_sg(DMA_PREP_INTERRUPT [| DMA_CTRL_ACK]); | ||
431 | * ...... | ||
432 | * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); // Last | ||
433 | * ...... | ||
434 | */ | ||
435 | static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | 375 | static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( |
436 | struct dma_chan *chan, struct scatterlist *sgl, | 376 | struct dma_chan *chan, struct scatterlist *sgl, |
437 | unsigned int sg_len, enum dma_transfer_direction direction, | 377 | unsigned int sg_len, enum dma_data_direction direction, |
438 | unsigned long flags, void *context) | 378 | unsigned long append) |
439 | { | 379 | { |
440 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | 380 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
441 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 381 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
@@ -443,8 +383,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | |||
443 | struct scatterlist *sg; | 383 | struct scatterlist *sg; |
444 | int i, j; | 384 | int i, j; |
445 | u32 *pio; | 385 | u32 *pio; |
446 | bool append = flags & DMA_PREP_INTERRUPT; | 386 | static int idx; |
447 | int idx = append ? mxs_chan->desc_count : 0; | ||
448 | 387 | ||
449 | if (mxs_chan->status == DMA_IN_PROGRESS && !append) | 388 | if (mxs_chan->status == DMA_IN_PROGRESS && !append) |
450 | return NULL; | 389 | return NULL; |
@@ -470,11 +409,12 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | |||
470 | ccw->bits |= CCW_CHAIN; | 409 | ccw->bits |= CCW_CHAIN; |
471 | ccw->bits &= ~CCW_IRQ; | 410 | ccw->bits &= ~CCW_IRQ; |
472 | ccw->bits &= ~CCW_DEC_SEM; | 411 | ccw->bits &= ~CCW_DEC_SEM; |
412 | ccw->bits &= ~CCW_WAIT4END; | ||
473 | } else { | 413 | } else { |
474 | idx = 0; | 414 | idx = 0; |
475 | } | 415 | } |
476 | 416 | ||
477 | if (direction == DMA_TRANS_NONE) { | 417 | if (direction == DMA_NONE) { |
478 | ccw = &mxs_chan->ccw[idx++]; | 418 | ccw = &mxs_chan->ccw[idx++]; |
479 | pio = (u32 *) sgl; | 419 | pio = (u32 *) sgl; |
480 | 420 | ||
@@ -484,17 +424,16 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | |||
484 | ccw->bits = 0; | 424 | ccw->bits = 0; |
485 | ccw->bits |= CCW_IRQ; | 425 | ccw->bits |= CCW_IRQ; |
486 | ccw->bits |= CCW_DEC_SEM; | 426 | ccw->bits |= CCW_DEC_SEM; |
487 | if (flags & DMA_CTRL_ACK) | 427 | ccw->bits |= CCW_WAIT4END; |
488 | ccw->bits |= CCW_WAIT4END; | ||
489 | ccw->bits |= CCW_HALT_ON_TERM; | 428 | ccw->bits |= CCW_HALT_ON_TERM; |
490 | ccw->bits |= CCW_TERM_FLUSH; | 429 | ccw->bits |= CCW_TERM_FLUSH; |
491 | ccw->bits |= BF_CCW(sg_len, PIO_NUM); | 430 | ccw->bits |= BF_CCW(sg_len, PIO_NUM); |
492 | ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND); | 431 | ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND); |
493 | } else { | 432 | } else { |
494 | for_each_sg(sgl, sg, sg_len, i) { | 433 | for_each_sg(sgl, sg, sg_len, i) { |
495 | if (sg_dma_len(sg) > MAX_XFER_BYTES) { | 434 | if (sg->length > MAX_XFER_BYTES) { |
496 | dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n", | 435 | dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n", |
497 | sg_dma_len(sg), MAX_XFER_BYTES); | 436 | sg->length, MAX_XFER_BYTES); |
498 | goto err_out; | 437 | goto err_out; |
499 | } | 438 | } |
500 | 439 | ||
@@ -502,13 +441,13 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | |||
502 | 441 | ||
503 | ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; | 442 | ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; |
504 | ccw->bufaddr = sg->dma_address; | 443 | ccw->bufaddr = sg->dma_address; |
505 | ccw->xfer_bytes = sg_dma_len(sg); | 444 | ccw->xfer_bytes = sg->length; |
506 | 445 | ||
507 | ccw->bits = 0; | 446 | ccw->bits = 0; |
508 | ccw->bits |= CCW_CHAIN; | 447 | ccw->bits |= CCW_CHAIN; |
509 | ccw->bits |= CCW_HALT_ON_TERM; | 448 | ccw->bits |= CCW_HALT_ON_TERM; |
510 | ccw->bits |= CCW_TERM_FLUSH; | 449 | ccw->bits |= CCW_TERM_FLUSH; |
511 | ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? | 450 | ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ? |
512 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, | 451 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, |
513 | COMMAND); | 452 | COMMAND); |
514 | 453 | ||
@@ -516,12 +455,10 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | |||
516 | ccw->bits &= ~CCW_CHAIN; | 455 | ccw->bits &= ~CCW_CHAIN; |
517 | ccw->bits |= CCW_IRQ; | 456 | ccw->bits |= CCW_IRQ; |
518 | ccw->bits |= CCW_DEC_SEM; | 457 | ccw->bits |= CCW_DEC_SEM; |
519 | if (flags & DMA_CTRL_ACK) | 458 | ccw->bits |= CCW_WAIT4END; |
520 | ccw->bits |= CCW_WAIT4END; | ||
521 | } | 459 | } |
522 | } | 460 | } |
523 | } | 461 | } |
524 | mxs_chan->desc_count = idx; | ||
525 | 462 | ||
526 | return &mxs_chan->desc; | 463 | return &mxs_chan->desc; |
527 | 464 | ||
@@ -532,8 +469,7 @@ err_out: | |||
532 | 469 | ||
533 | static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( | 470 | static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( |
534 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | 471 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, |
535 | size_t period_len, enum dma_transfer_direction direction, | 472 | size_t period_len, enum dma_data_direction direction) |
536 | unsigned long flags, void *context) | ||
537 | { | 473 | { |
538 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | 474 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
539 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 475 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
@@ -576,7 +512,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( | |||
576 | ccw->bits |= CCW_IRQ; | 512 | ccw->bits |= CCW_IRQ; |
577 | ccw->bits |= CCW_HALT_ON_TERM; | 513 | ccw->bits |= CCW_HALT_ON_TERM; |
578 | ccw->bits |= CCW_TERM_FLUSH; | 514 | ccw->bits |= CCW_TERM_FLUSH; |
579 | ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? | 515 | ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ? |
580 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); | 516 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); |
581 | 517 | ||
582 | dma_addr += period_len; | 518 | dma_addr += period_len; |
@@ -584,7 +520,6 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( | |||
584 | 520 | ||
585 | i++; | 521 | i++; |
586 | } | 522 | } |
587 | mxs_chan->desc_count = i; | ||
588 | 523 | ||
589 | return &mxs_chan->desc; | 524 | return &mxs_chan->desc; |
590 | 525 | ||
@@ -601,8 +536,8 @@ static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
601 | 536 | ||
602 | switch (cmd) { | 537 | switch (cmd) { |
603 | case DMA_TERMINATE_ALL: | 538 | case DMA_TERMINATE_ALL: |
604 | mxs_dma_reset_chan(mxs_chan); | ||
605 | mxs_dma_disable_chan(mxs_chan); | 539 | mxs_dma_disable_chan(mxs_chan); |
540 | mxs_dma_reset_chan(mxs_chan); | ||
606 | break; | 541 | break; |
607 | case DMA_PAUSE: | 542 | case DMA_PAUSE: |
608 | mxs_dma_pause_chan(mxs_chan); | 543 | mxs_dma_pause_chan(mxs_chan); |
@@ -624,52 +559,60 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, | |||
624 | dma_cookie_t last_used; | 559 | dma_cookie_t last_used; |
625 | 560 | ||
626 | last_used = chan->cookie; | 561 | last_used = chan->cookie; |
627 | dma_set_tx_state(txstate, chan->completed_cookie, last_used, 0); | 562 | dma_set_tx_state(txstate, mxs_chan->last_completed, last_used, 0); |
628 | 563 | ||
629 | return mxs_chan->status; | 564 | return mxs_chan->status; |
630 | } | 565 | } |
631 | 566 | ||
632 | static void mxs_dma_issue_pending(struct dma_chan *chan) | 567 | static void mxs_dma_issue_pending(struct dma_chan *chan) |
633 | { | 568 | { |
634 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | 569 | /* |
635 | 570 | * Nothing to do. We only have a single descriptor. | |
636 | mxs_dma_enable_chan(mxs_chan); | 571 | */ |
637 | } | 572 | } |
638 | 573 | ||
639 | static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) | 574 | static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) |
640 | { | 575 | { |
641 | int ret; | 576 | int ret; |
642 | 577 | ||
643 | ret = clk_prepare_enable(mxs_dma->clk); | 578 | ret = clk_enable(mxs_dma->clk); |
644 | if (ret) | 579 | if (ret) |
645 | return ret; | 580 | goto err_out; |
646 | 581 | ||
647 | ret = stmp_reset_block(mxs_dma->base); | 582 | ret = mxs_reset_block(mxs_dma->base); |
648 | if (ret) | 583 | if (ret) |
649 | goto err_out; | 584 | goto err_out; |
650 | 585 | ||
586 | /* only major version matters */ | ||
587 | mxs_dma->version = readl(mxs_dma->base + | ||
588 | ((mxs_dma->dev_id == MXS_DMA_APBX) ? | ||
589 | HW_APBX_VERSION : HW_APBH_VERSION)) >> | ||
590 | BP_APBHX_VERSION_MAJOR; | ||
591 | |||
651 | /* enable apbh burst */ | 592 | /* enable apbh burst */ |
652 | if (dma_is_apbh(mxs_dma)) { | 593 | if (dma_is_apbh()) { |
653 | writel(BM_APBH_CTRL0_APB_BURST_EN, | 594 | writel(BM_APBH_CTRL0_APB_BURST_EN, |
654 | mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); | 595 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); |
655 | writel(BM_APBH_CTRL0_APB_BURST8_EN, | 596 | writel(BM_APBH_CTRL0_APB_BURST8_EN, |
656 | mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); | 597 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); |
657 | } | 598 | } |
658 | 599 | ||
659 | /* enable irq for all the channels */ | 600 | /* enable irq for all the channels */ |
660 | writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS, | 601 | writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS, |
661 | mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_SET); | 602 | mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR); |
603 | |||
604 | clk_disable(mxs_dma->clk); | ||
605 | |||
606 | return 0; | ||
662 | 607 | ||
663 | err_out: | 608 | err_out: |
664 | clk_disable_unprepare(mxs_dma->clk); | ||
665 | return ret; | 609 | return ret; |
666 | } | 610 | } |
667 | 611 | ||
668 | static int __init mxs_dma_probe(struct platform_device *pdev) | 612 | static int __init mxs_dma_probe(struct platform_device *pdev) |
669 | { | 613 | { |
670 | const struct platform_device_id *id_entry; | 614 | const struct platform_device_id *id_entry = |
671 | const struct of_device_id *of_id; | 615 | platform_get_device_id(pdev); |
672 | const struct mxs_dma_type *dma_type; | ||
673 | struct mxs_dma_engine *mxs_dma; | 616 | struct mxs_dma_engine *mxs_dma; |
674 | struct resource *iores; | 617 | struct resource *iores; |
675 | int ret, i; | 618 | int ret, i; |
@@ -678,15 +621,7 @@ static int __init mxs_dma_probe(struct platform_device *pdev) | |||
678 | if (!mxs_dma) | 621 | if (!mxs_dma) |
679 | return -ENOMEM; | 622 | return -ENOMEM; |
680 | 623 | ||
681 | of_id = of_match_device(mxs_dma_dt_ids, &pdev->dev); | 624 | mxs_dma->dev_id = id_entry->driver_data; |
682 | if (of_id) | ||
683 | id_entry = of_id->data; | ||
684 | else | ||
685 | id_entry = platform_get_device_id(pdev); | ||
686 | |||
687 | dma_type = (struct mxs_dma_type *)id_entry->driver_data; | ||
688 | mxs_dma->type = dma_type->type; | ||
689 | mxs_dma->dev_id = dma_type->id; | ||
690 | 625 | ||
691 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 626 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
692 | 627 | ||
@@ -719,7 +654,6 @@ static int __init mxs_dma_probe(struct platform_device *pdev) | |||
719 | 654 | ||
720 | mxs_chan->mxs_dma = mxs_dma; | 655 | mxs_chan->mxs_dma = mxs_dma; |
721 | mxs_chan->chan.device = &mxs_dma->dma_device; | 656 | mxs_chan->chan.device = &mxs_dma->dma_device; |
722 | dma_cookie_init(&mxs_chan->chan); | ||
723 | 657 | ||
724 | tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet, | 658 | tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet, |
725 | (unsigned long) mxs_chan); | 659 | (unsigned long) mxs_chan); |
@@ -769,12 +703,23 @@ err_request_region: | |||
769 | return ret; | 703 | return ret; |
770 | } | 704 | } |
771 | 705 | ||
706 | static struct platform_device_id mxs_dma_type[] = { | ||
707 | { | ||
708 | .name = "mxs-dma-apbh", | ||
709 | .driver_data = MXS_DMA_APBH, | ||
710 | }, { | ||
711 | .name = "mxs-dma-apbx", | ||
712 | .driver_data = MXS_DMA_APBX, | ||
713 | }, { | ||
714 | /* end of list */ | ||
715 | } | ||
716 | }; | ||
717 | |||
772 | static struct platform_driver mxs_dma_driver = { | 718 | static struct platform_driver mxs_dma_driver = { |
773 | .driver = { | 719 | .driver = { |
774 | .name = "mxs-dma", | 720 | .name = "mxs-dma", |
775 | .of_match_table = mxs_dma_dt_ids, | ||
776 | }, | 721 | }, |
777 | .id_table = mxs_dma_ids, | 722 | .id_table = mxs_dma_type, |
778 | }; | 723 | }; |
779 | 724 | ||
780 | static int __init mxs_dma_module_init(void) | 725 | static int __init mxs_dma_module_init(void) |
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c deleted file mode 100644 index 5a31264f2bd..00000000000 --- a/drivers/dma/omap-dma.c +++ /dev/null | |||
@@ -1,695 +0,0 @@ | |||
1 | /* | ||
2 | * OMAP DMAengine support | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | #include <linux/dmaengine.h> | ||
9 | #include <linux/dma-mapping.h> | ||
10 | #include <linux/err.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/list.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/omap-dma.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/spinlock.h> | ||
19 | |||
20 | #include "virt-dma.h" | ||
21 | |||
22 | struct omap_dmadev { | ||
23 | struct dma_device ddev; | ||
24 | spinlock_t lock; | ||
25 | struct tasklet_struct task; | ||
26 | struct list_head pending; | ||
27 | }; | ||
28 | |||
29 | struct omap_chan { | ||
30 | struct virt_dma_chan vc; | ||
31 | struct list_head node; | ||
32 | |||
33 | struct dma_slave_config cfg; | ||
34 | unsigned dma_sig; | ||
35 | bool cyclic; | ||
36 | bool paused; | ||
37 | |||
38 | int dma_ch; | ||
39 | struct omap_desc *desc; | ||
40 | unsigned sgidx; | ||
41 | }; | ||
42 | |||
43 | struct omap_sg { | ||
44 | dma_addr_t addr; | ||
45 | uint32_t en; /* number of elements (24-bit) */ | ||
46 | uint32_t fn; /* number of frames (16-bit) */ | ||
47 | }; | ||
48 | |||
49 | struct omap_desc { | ||
50 | struct virt_dma_desc vd; | ||
51 | enum dma_transfer_direction dir; | ||
52 | dma_addr_t dev_addr; | ||
53 | |||
54 | int16_t fi; /* for OMAP_DMA_SYNC_PACKET */ | ||
55 | uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */ | ||
56 | uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */ | ||
57 | uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */ | ||
58 | uint8_t periph_port; /* Peripheral port */ | ||
59 | |||
60 | unsigned sglen; | ||
61 | struct omap_sg sg[0]; | ||
62 | }; | ||
63 | |||
64 | static const unsigned es_bytes[] = { | ||
65 | [OMAP_DMA_DATA_TYPE_S8] = 1, | ||
66 | [OMAP_DMA_DATA_TYPE_S16] = 2, | ||
67 | [OMAP_DMA_DATA_TYPE_S32] = 4, | ||
68 | }; | ||
69 | |||
70 | static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d) | ||
71 | { | ||
72 | return container_of(d, struct omap_dmadev, ddev); | ||
73 | } | ||
74 | |||
75 | static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c) | ||
76 | { | ||
77 | return container_of(c, struct omap_chan, vc.chan); | ||
78 | } | ||
79 | |||
80 | static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t) | ||
81 | { | ||
82 | return container_of(t, struct omap_desc, vd.tx); | ||
83 | } | ||
84 | |||
85 | static void omap_dma_desc_free(struct virt_dma_desc *vd) | ||
86 | { | ||
87 | kfree(container_of(vd, struct omap_desc, vd)); | ||
88 | } | ||
89 | |||
90 | static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, | ||
91 | unsigned idx) | ||
92 | { | ||
93 | struct omap_sg *sg = d->sg + idx; | ||
94 | |||
95 | if (d->dir == DMA_DEV_TO_MEM) | ||
96 | omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF, | ||
97 | OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0); | ||
98 | else | ||
99 | omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF, | ||
100 | OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0); | ||
101 | |||
102 | omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn, | ||
103 | d->sync_mode, c->dma_sig, d->sync_type); | ||
104 | |||
105 | omap_start_dma(c->dma_ch); | ||
106 | } | ||
107 | |||
108 | static void omap_dma_start_desc(struct omap_chan *c) | ||
109 | { | ||
110 | struct virt_dma_desc *vd = vchan_next_desc(&c->vc); | ||
111 | struct omap_desc *d; | ||
112 | |||
113 | if (!vd) { | ||
114 | c->desc = NULL; | ||
115 | return; | ||
116 | } | ||
117 | |||
118 | list_del(&vd->node); | ||
119 | |||
120 | c->desc = d = to_omap_dma_desc(&vd->tx); | ||
121 | c->sgidx = 0; | ||
122 | |||
123 | if (d->dir == DMA_DEV_TO_MEM) | ||
124 | omap_set_dma_src_params(c->dma_ch, d->periph_port, | ||
125 | OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi); | ||
126 | else | ||
127 | omap_set_dma_dest_params(c->dma_ch, d->periph_port, | ||
128 | OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi); | ||
129 | |||
130 | omap_dma_start_sg(c, d, 0); | ||
131 | } | ||
132 | |||
133 | static void omap_dma_callback(int ch, u16 status, void *data) | ||
134 | { | ||
135 | struct omap_chan *c = data; | ||
136 | struct omap_desc *d; | ||
137 | unsigned long flags; | ||
138 | |||
139 | spin_lock_irqsave(&c->vc.lock, flags); | ||
140 | d = c->desc; | ||
141 | if (d) { | ||
142 | if (!c->cyclic) { | ||
143 | if (++c->sgidx < d->sglen) { | ||
144 | omap_dma_start_sg(c, d, c->sgidx); | ||
145 | } else { | ||
146 | omap_dma_start_desc(c); | ||
147 | vchan_cookie_complete(&d->vd); | ||
148 | } | ||
149 | } else { | ||
150 | vchan_cyclic_callback(&d->vd); | ||
151 | } | ||
152 | } | ||
153 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
154 | } | ||
155 | |||
156 | /* | ||
157 | * This callback schedules all pending channels. We could be more | ||
158 | * clever here by postponing allocation of the real DMA channels to | ||
159 | * this point, and freeing them when our virtual channel becomes idle. | ||
160 | * | ||
161 | * We would then need to deal with 'all channels in-use' | ||
162 | */ | ||
163 | static void omap_dma_sched(unsigned long data) | ||
164 | { | ||
165 | struct omap_dmadev *d = (struct omap_dmadev *)data; | ||
166 | LIST_HEAD(head); | ||
167 | |||
168 | spin_lock_irq(&d->lock); | ||
169 | list_splice_tail_init(&d->pending, &head); | ||
170 | spin_unlock_irq(&d->lock); | ||
171 | |||
172 | while (!list_empty(&head)) { | ||
173 | struct omap_chan *c = list_first_entry(&head, | ||
174 | struct omap_chan, node); | ||
175 | |||
176 | spin_lock_irq(&c->vc.lock); | ||
177 | list_del_init(&c->node); | ||
178 | omap_dma_start_desc(c); | ||
179 | spin_unlock_irq(&c->vc.lock); | ||
180 | } | ||
181 | } | ||
182 | |||
183 | static int omap_dma_alloc_chan_resources(struct dma_chan *chan) | ||
184 | { | ||
185 | struct omap_chan *c = to_omap_dma_chan(chan); | ||
186 | |||
187 | dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig); | ||
188 | |||
189 | return omap_request_dma(c->dma_sig, "DMA engine", | ||
190 | omap_dma_callback, c, &c->dma_ch); | ||
191 | } | ||
192 | |||
193 | static void omap_dma_free_chan_resources(struct dma_chan *chan) | ||
194 | { | ||
195 | struct omap_chan *c = to_omap_dma_chan(chan); | ||
196 | |||
197 | vchan_free_chan_resources(&c->vc); | ||
198 | omap_free_dma(c->dma_ch); | ||
199 | |||
200 | dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig); | ||
201 | } | ||
202 | |||
203 | static size_t omap_dma_sg_size(struct omap_sg *sg) | ||
204 | { | ||
205 | return sg->en * sg->fn; | ||
206 | } | ||
207 | |||
208 | static size_t omap_dma_desc_size(struct omap_desc *d) | ||
209 | { | ||
210 | unsigned i; | ||
211 | size_t size; | ||
212 | |||
213 | for (size = i = 0; i < d->sglen; i++) | ||
214 | size += omap_dma_sg_size(&d->sg[i]); | ||
215 | |||
216 | return size * es_bytes[d->es]; | ||
217 | } | ||
218 | |||
219 | static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr) | ||
220 | { | ||
221 | unsigned i; | ||
222 | size_t size, es_size = es_bytes[d->es]; | ||
223 | |||
224 | for (size = i = 0; i < d->sglen; i++) { | ||
225 | size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size; | ||
226 | |||
227 | if (size) | ||
228 | size += this_size; | ||
229 | else if (addr >= d->sg[i].addr && | ||
230 | addr < d->sg[i].addr + this_size) | ||
231 | size += d->sg[i].addr + this_size - addr; | ||
232 | } | ||
233 | return size; | ||
234 | } | ||
235 | |||
236 | static enum dma_status omap_dma_tx_status(struct dma_chan *chan, | ||
237 | dma_cookie_t cookie, struct dma_tx_state *txstate) | ||
238 | { | ||
239 | struct omap_chan *c = to_omap_dma_chan(chan); | ||
240 | struct virt_dma_desc *vd; | ||
241 | enum dma_status ret; | ||
242 | unsigned long flags; | ||
243 | |||
244 | ret = dma_cookie_status(chan, cookie, txstate); | ||
245 | if (ret == DMA_SUCCESS || !txstate) | ||
246 | return ret; | ||
247 | |||
248 | spin_lock_irqsave(&c->vc.lock, flags); | ||
249 | vd = vchan_find_desc(&c->vc, cookie); | ||
250 | if (vd) { | ||
251 | txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx)); | ||
252 | } else if (c->desc && c->desc->vd.tx.cookie == cookie) { | ||
253 | struct omap_desc *d = c->desc; | ||
254 | dma_addr_t pos; | ||
255 | |||
256 | if (d->dir == DMA_MEM_TO_DEV) | ||
257 | pos = omap_get_dma_src_pos(c->dma_ch); | ||
258 | else if (d->dir == DMA_DEV_TO_MEM) | ||
259 | pos = omap_get_dma_dst_pos(c->dma_ch); | ||
260 | else | ||
261 | pos = 0; | ||
262 | |||
263 | txstate->residue = omap_dma_desc_size_pos(d, pos); | ||
264 | } else { | ||
265 | txstate->residue = 0; | ||
266 | } | ||
267 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
268 | |||
269 | return ret; | ||
270 | } | ||
271 | |||
272 | static void omap_dma_issue_pending(struct dma_chan *chan) | ||
273 | { | ||
274 | struct omap_chan *c = to_omap_dma_chan(chan); | ||
275 | unsigned long flags; | ||
276 | |||
277 | spin_lock_irqsave(&c->vc.lock, flags); | ||
278 | if (vchan_issue_pending(&c->vc) && !c->desc) { | ||
279 | struct omap_dmadev *d = to_omap_dma_dev(chan->device); | ||
280 | spin_lock(&d->lock); | ||
281 | if (list_empty(&c->node)) | ||
282 | list_add_tail(&c->node, &d->pending); | ||
283 | spin_unlock(&d->lock); | ||
284 | tasklet_schedule(&d->task); | ||
285 | } | ||
286 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
287 | } | ||
288 | |||
289 | static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( | ||
290 | struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen, | ||
291 | enum dma_transfer_direction dir, unsigned long tx_flags, void *context) | ||
292 | { | ||
293 | struct omap_chan *c = to_omap_dma_chan(chan); | ||
294 | enum dma_slave_buswidth dev_width; | ||
295 | struct scatterlist *sgent; | ||
296 | struct omap_desc *d; | ||
297 | dma_addr_t dev_addr; | ||
298 | unsigned i, j = 0, es, en, frame_bytes, sync_type; | ||
299 | u32 burst; | ||
300 | |||
301 | if (dir == DMA_DEV_TO_MEM) { | ||
302 | dev_addr = c->cfg.src_addr; | ||
303 | dev_width = c->cfg.src_addr_width; | ||
304 | burst = c->cfg.src_maxburst; | ||
305 | sync_type = OMAP_DMA_SRC_SYNC; | ||
306 | } else if (dir == DMA_MEM_TO_DEV) { | ||
307 | dev_addr = c->cfg.dst_addr; | ||
308 | dev_width = c->cfg.dst_addr_width; | ||
309 | burst = c->cfg.dst_maxburst; | ||
310 | sync_type = OMAP_DMA_DST_SYNC; | ||
311 | } else { | ||
312 | dev_err(chan->device->dev, "%s: bad direction?\n", __func__); | ||
313 | return NULL; | ||
314 | } | ||
315 | |||
316 | /* Bus width translates to the element size (ES) */ | ||
317 | switch (dev_width) { | ||
318 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
319 | es = OMAP_DMA_DATA_TYPE_S8; | ||
320 | break; | ||
321 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
322 | es = OMAP_DMA_DATA_TYPE_S16; | ||
323 | break; | ||
324 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
325 | es = OMAP_DMA_DATA_TYPE_S32; | ||
326 | break; | ||
327 | default: /* not reached */ | ||
328 | return NULL; | ||
329 | } | ||
330 | |||
331 | /* Now allocate and setup the descriptor. */ | ||
332 | d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC); | ||
333 | if (!d) | ||
334 | return NULL; | ||
335 | |||
336 | d->dir = dir; | ||
337 | d->dev_addr = dev_addr; | ||
338 | d->es = es; | ||
339 | d->sync_mode = OMAP_DMA_SYNC_FRAME; | ||
340 | d->sync_type = sync_type; | ||
341 | d->periph_port = OMAP_DMA_PORT_TIPB; | ||
342 | |||
343 | /* | ||
344 | * Build our scatterlist entries: each contains the address, | ||
345 | * the number of elements (EN) in each frame, and the number of | ||
346 | * frames (FN). Number of bytes for this entry = ES * EN * FN. | ||
347 | * | ||
348 | * Burst size translates to number of elements with frame sync. | ||
349 | * Note: DMA engine defines burst to be the number of dev-width | ||
350 | * transfers. | ||
351 | */ | ||
352 | en = burst; | ||
353 | frame_bytes = es_bytes[es] * en; | ||
354 | for_each_sg(sgl, sgent, sglen, i) { | ||
355 | d->sg[j].addr = sg_dma_address(sgent); | ||
356 | d->sg[j].en = en; | ||
357 | d->sg[j].fn = sg_dma_len(sgent) / frame_bytes; | ||
358 | j++; | ||
359 | } | ||
360 | |||
361 | d->sglen = j; | ||
362 | |||
363 | return vchan_tx_prep(&c->vc, &d->vd, tx_flags); | ||
364 | } | ||
365 | |||
366 | static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( | ||
367 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | ||
368 | size_t period_len, enum dma_transfer_direction dir, unsigned long flags, | ||
369 | void *context) | ||
370 | { | ||
371 | struct omap_chan *c = to_omap_dma_chan(chan); | ||
372 | enum dma_slave_buswidth dev_width; | ||
373 | struct omap_desc *d; | ||
374 | dma_addr_t dev_addr; | ||
375 | unsigned es, sync_type; | ||
376 | u32 burst; | ||
377 | |||
378 | if (dir == DMA_DEV_TO_MEM) { | ||
379 | dev_addr = c->cfg.src_addr; | ||
380 | dev_width = c->cfg.src_addr_width; | ||
381 | burst = c->cfg.src_maxburst; | ||
382 | sync_type = OMAP_DMA_SRC_SYNC; | ||
383 | } else if (dir == DMA_MEM_TO_DEV) { | ||
384 | dev_addr = c->cfg.dst_addr; | ||
385 | dev_width = c->cfg.dst_addr_width; | ||
386 | burst = c->cfg.dst_maxburst; | ||
387 | sync_type = OMAP_DMA_DST_SYNC; | ||
388 | } else { | ||
389 | dev_err(chan->device->dev, "%s: bad direction?\n", __func__); | ||
390 | return NULL; | ||
391 | } | ||
392 | |||
393 | /* Bus width translates to the element size (ES) */ | ||
394 | switch (dev_width) { | ||
395 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
396 | es = OMAP_DMA_DATA_TYPE_S8; | ||
397 | break; | ||
398 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
399 | es = OMAP_DMA_DATA_TYPE_S16; | ||
400 | break; | ||
401 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
402 | es = OMAP_DMA_DATA_TYPE_S32; | ||
403 | break; | ||
404 | default: /* not reached */ | ||
405 | return NULL; | ||
406 | } | ||
407 | |||
408 | /* Now allocate and setup the descriptor. */ | ||
409 | d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); | ||
410 | if (!d) | ||
411 | return NULL; | ||
412 | |||
413 | d->dir = dir; | ||
414 | d->dev_addr = dev_addr; | ||
415 | d->fi = burst; | ||
416 | d->es = es; | ||
417 | if (burst) | ||
418 | d->sync_mode = OMAP_DMA_SYNC_PACKET; | ||
419 | else | ||
420 | d->sync_mode = OMAP_DMA_SYNC_ELEMENT; | ||
421 | d->sync_type = sync_type; | ||
422 | d->periph_port = OMAP_DMA_PORT_MPUI; | ||
423 | d->sg[0].addr = buf_addr; | ||
424 | d->sg[0].en = period_len / es_bytes[es]; | ||
425 | d->sg[0].fn = buf_len / period_len; | ||
426 | d->sglen = 1; | ||
427 | |||
428 | if (!c->cyclic) { | ||
429 | c->cyclic = true; | ||
430 | omap_dma_link_lch(c->dma_ch, c->dma_ch); | ||
431 | |||
432 | if (flags & DMA_PREP_INTERRUPT) | ||
433 | omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ); | ||
434 | |||
435 | omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ); | ||
436 | } | ||
437 | |||
438 | if (dma_omap2plus()) { | ||
439 | omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16); | ||
440 | omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16); | ||
441 | } | ||
442 | |||
443 | return vchan_tx_prep(&c->vc, &d->vd, flags); | ||
444 | } | ||
445 | |||
446 | static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg) | ||
447 | { | ||
448 | if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || | ||
449 | cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) | ||
450 | return -EINVAL; | ||
451 | |||
452 | memcpy(&c->cfg, cfg, sizeof(c->cfg)); | ||
453 | |||
454 | return 0; | ||
455 | } | ||
456 | |||
457 | static int omap_dma_terminate_all(struct omap_chan *c) | ||
458 | { | ||
459 | struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device); | ||
460 | unsigned long flags; | ||
461 | LIST_HEAD(head); | ||
462 | |||
463 | spin_lock_irqsave(&c->vc.lock, flags); | ||
464 | |||
465 | /* Prevent this channel being scheduled */ | ||
466 | spin_lock(&d->lock); | ||
467 | list_del_init(&c->node); | ||
468 | spin_unlock(&d->lock); | ||
469 | |||
470 | /* | ||
471 | * Stop DMA activity: we assume the callback will not be called | ||
472 | * after omap_stop_dma() returns (even if it does, it will see | ||
473 | * c->desc is NULL and exit.) | ||
474 | */ | ||
475 | if (c->desc) { | ||
476 | c->desc = NULL; | ||
477 | /* Avoid stopping the dma twice */ | ||
478 | if (!c->paused) | ||
479 | omap_stop_dma(c->dma_ch); | ||
480 | } | ||
481 | |||
482 | if (c->cyclic) { | ||
483 | c->cyclic = false; | ||
484 | c->paused = false; | ||
485 | omap_dma_unlink_lch(c->dma_ch, c->dma_ch); | ||
486 | } | ||
487 | |||
488 | vchan_get_all_descriptors(&c->vc, &head); | ||
489 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
490 | vchan_dma_desc_free_list(&c->vc, &head); | ||
491 | |||
492 | return 0; | ||
493 | } | ||
494 | |||
495 | static int omap_dma_pause(struct omap_chan *c) | ||
496 | { | ||
497 | /* Pause/Resume only allowed with cyclic mode */ | ||
498 | if (!c->cyclic) | ||
499 | return -EINVAL; | ||
500 | |||
501 | if (!c->paused) { | ||
502 | omap_stop_dma(c->dma_ch); | ||
503 | c->paused = true; | ||
504 | } | ||
505 | |||
506 | return 0; | ||
507 | } | ||
508 | |||
509 | static int omap_dma_resume(struct omap_chan *c) | ||
510 | { | ||
511 | /* Pause/Resume only allowed with cyclic mode */ | ||
512 | if (!c->cyclic) | ||
513 | return -EINVAL; | ||
514 | |||
515 | if (c->paused) { | ||
516 | omap_start_dma(c->dma_ch); | ||
517 | c->paused = false; | ||
518 | } | ||
519 | |||
520 | return 0; | ||
521 | } | ||
522 | |||
523 | static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
524 | unsigned long arg) | ||
525 | { | ||
526 | struct omap_chan *c = to_omap_dma_chan(chan); | ||
527 | int ret; | ||
528 | |||
529 | switch (cmd) { | ||
530 | case DMA_SLAVE_CONFIG: | ||
531 | ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg); | ||
532 | break; | ||
533 | |||
534 | case DMA_TERMINATE_ALL: | ||
535 | ret = omap_dma_terminate_all(c); | ||
536 | break; | ||
537 | |||
538 | case DMA_PAUSE: | ||
539 | ret = omap_dma_pause(c); | ||
540 | break; | ||
541 | |||
542 | case DMA_RESUME: | ||
543 | ret = omap_dma_resume(c); | ||
544 | break; | ||
545 | |||
546 | default: | ||
547 | ret = -ENXIO; | ||
548 | break; | ||
549 | } | ||
550 | |||
551 | return ret; | ||
552 | } | ||
553 | |||
554 | static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig) | ||
555 | { | ||
556 | struct omap_chan *c; | ||
557 | |||
558 | c = kzalloc(sizeof(*c), GFP_KERNEL); | ||
559 | if (!c) | ||
560 | return -ENOMEM; | ||
561 | |||
562 | c->dma_sig = dma_sig; | ||
563 | c->vc.desc_free = omap_dma_desc_free; | ||
564 | vchan_init(&c->vc, &od->ddev); | ||
565 | INIT_LIST_HEAD(&c->node); | ||
566 | |||
567 | od->ddev.chancnt++; | ||
568 | |||
569 | return 0; | ||
570 | } | ||
571 | |||
572 | static void omap_dma_free(struct omap_dmadev *od) | ||
573 | { | ||
574 | tasklet_kill(&od->task); | ||
575 | while (!list_empty(&od->ddev.channels)) { | ||
576 | struct omap_chan *c = list_first_entry(&od->ddev.channels, | ||
577 | struct omap_chan, vc.chan.device_node); | ||
578 | |||
579 | list_del(&c->vc.chan.device_node); | ||
580 | tasklet_kill(&c->vc.task); | ||
581 | kfree(c); | ||
582 | } | ||
583 | kfree(od); | ||
584 | } | ||
585 | |||
586 | static int omap_dma_probe(struct platform_device *pdev) | ||
587 | { | ||
588 | struct omap_dmadev *od; | ||
589 | int rc, i; | ||
590 | |||
591 | od = kzalloc(sizeof(*od), GFP_KERNEL); | ||
592 | if (!od) | ||
593 | return -ENOMEM; | ||
594 | |||
595 | dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); | ||
596 | dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); | ||
597 | od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources; | ||
598 | od->ddev.device_free_chan_resources = omap_dma_free_chan_resources; | ||
599 | od->ddev.device_tx_status = omap_dma_tx_status; | ||
600 | od->ddev.device_issue_pending = omap_dma_issue_pending; | ||
601 | od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; | ||
602 | od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; | ||
603 | od->ddev.device_control = omap_dma_control; | ||
604 | od->ddev.dev = &pdev->dev; | ||
605 | INIT_LIST_HEAD(&od->ddev.channels); | ||
606 | INIT_LIST_HEAD(&od->pending); | ||
607 | spin_lock_init(&od->lock); | ||
608 | |||
609 | tasklet_init(&od->task, omap_dma_sched, (unsigned long)od); | ||
610 | |||
611 | for (i = 0; i < 127; i++) { | ||
612 | rc = omap_dma_chan_init(od, i); | ||
613 | if (rc) { | ||
614 | omap_dma_free(od); | ||
615 | return rc; | ||
616 | } | ||
617 | } | ||
618 | |||
619 | rc = dma_async_device_register(&od->ddev); | ||
620 | if (rc) { | ||
621 | pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", | ||
622 | rc); | ||
623 | omap_dma_free(od); | ||
624 | } else { | ||
625 | platform_set_drvdata(pdev, od); | ||
626 | } | ||
627 | |||
628 | dev_info(&pdev->dev, "OMAP DMA engine driver\n"); | ||
629 | |||
630 | return rc; | ||
631 | } | ||
632 | |||
633 | static int omap_dma_remove(struct platform_device *pdev) | ||
634 | { | ||
635 | struct omap_dmadev *od = platform_get_drvdata(pdev); | ||
636 | |||
637 | dma_async_device_unregister(&od->ddev); | ||
638 | omap_dma_free(od); | ||
639 | |||
640 | return 0; | ||
641 | } | ||
642 | |||
643 | static struct platform_driver omap_dma_driver = { | ||
644 | .probe = omap_dma_probe, | ||
645 | .remove = omap_dma_remove, | ||
646 | .driver = { | ||
647 | .name = "omap-dma-engine", | ||
648 | .owner = THIS_MODULE, | ||
649 | }, | ||
650 | }; | ||
651 | |||
652 | bool omap_dma_filter_fn(struct dma_chan *chan, void *param) | ||
653 | { | ||
654 | if (chan->device->dev->driver == &omap_dma_driver.driver) { | ||
655 | struct omap_chan *c = to_omap_dma_chan(chan); | ||
656 | unsigned req = *(unsigned *)param; | ||
657 | |||
658 | return req == c->dma_sig; | ||
659 | } | ||
660 | return false; | ||
661 | } | ||
662 | EXPORT_SYMBOL_GPL(omap_dma_filter_fn); | ||
663 | |||
664 | static struct platform_device *pdev; | ||
665 | |||
666 | static const struct platform_device_info omap_dma_dev_info = { | ||
667 | .name = "omap-dma-engine", | ||
668 | .id = -1, | ||
669 | .dma_mask = DMA_BIT_MASK(32), | ||
670 | }; | ||
671 | |||
672 | static int omap_dma_init(void) | ||
673 | { | ||
674 | int rc = platform_driver_register(&omap_dma_driver); | ||
675 | |||
676 | if (rc == 0) { | ||
677 | pdev = platform_device_register_full(&omap_dma_dev_info); | ||
678 | if (IS_ERR(pdev)) { | ||
679 | platform_driver_unregister(&omap_dma_driver); | ||
680 | rc = PTR_ERR(pdev); | ||
681 | } | ||
682 | } | ||
683 | return rc; | ||
684 | } | ||
685 | subsys_initcall(omap_dma_init); | ||
686 | |||
687 | static void __exit omap_dma_exit(void) | ||
688 | { | ||
689 | platform_device_unregister(pdev); | ||
690 | platform_driver_unregister(&omap_dma_driver); | ||
691 | } | ||
692 | module_exit(omap_dma_exit); | ||
693 | |||
694 | MODULE_AUTHOR("Russell King"); | ||
695 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 3f2617255ef..1ac8d4b580b 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Topcliff PCH DMA controller driver | 2 | * Topcliff PCH DMA controller driver |
3 | * Copyright (c) 2010 Intel Corporation | 3 | * Copyright (c) 2010 Intel Corporation |
4 | * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd. | 4 | * Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
@@ -25,8 +25,6 @@ | |||
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/pch_dma.h> | 26 | #include <linux/pch_dma.h> |
27 | 27 | ||
28 | #include "dmaengine.h" | ||
29 | |||
30 | #define DRV_NAME "pch-dma" | 28 | #define DRV_NAME "pch-dma" |
31 | 29 | ||
32 | #define DMA_CTL0_DISABLE 0x0 | 30 | #define DMA_CTL0_DISABLE 0x0 |
@@ -62,7 +60,7 @@ | |||
62 | #define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2 | 60 | #define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2 |
63 | #define DMA_DESC_FOLLOW_WITH_IRQ 0x3 | 61 | #define DMA_DESC_FOLLOW_WITH_IRQ 0x3 |
64 | 62 | ||
65 | #define MAX_CHAN_NR 12 | 63 | #define MAX_CHAN_NR 8 |
66 | 64 | ||
67 | #define DMA_MASK_CTL0_MODE 0x33333333 | 65 | #define DMA_MASK_CTL0_MODE 0x33333333 |
68 | #define DMA_MASK_CTL2_MODE 0x00003333 | 66 | #define DMA_MASK_CTL2_MODE 0x00003333 |
@@ -101,12 +99,13 @@ struct pch_dma_desc { | |||
101 | struct pch_dma_chan { | 99 | struct pch_dma_chan { |
102 | struct dma_chan chan; | 100 | struct dma_chan chan; |
103 | void __iomem *membase; | 101 | void __iomem *membase; |
104 | enum dma_transfer_direction dir; | 102 | enum dma_data_direction dir; |
105 | struct tasklet_struct tasklet; | 103 | struct tasklet_struct tasklet; |
106 | unsigned long err_status; | 104 | unsigned long err_status; |
107 | 105 | ||
108 | spinlock_t lock; | 106 | spinlock_t lock; |
109 | 107 | ||
108 | dma_cookie_t completed_cookie; | ||
110 | struct list_head active_list; | 109 | struct list_head active_list; |
111 | struct list_head queue; | 110 | struct list_head queue; |
112 | struct list_head free_list; | 111 | struct list_head free_list; |
@@ -225,7 +224,7 @@ static void pdc_set_dir(struct dma_chan *chan) | |||
225 | mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS << | 224 | mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS << |
226 | (DMA_CTL0_BITS_PER_CH * chan->chan_id)); | 225 | (DMA_CTL0_BITS_PER_CH * chan->chan_id)); |
227 | val &= mask_mode; | 226 | val &= mask_mode; |
228 | if (pd_chan->dir == DMA_MEM_TO_DEV) | 227 | if (pd_chan->dir == DMA_TO_DEVICE) |
229 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + | 228 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + |
230 | DMA_CTL0_DIR_SHIFT_BITS); | 229 | DMA_CTL0_DIR_SHIFT_BITS); |
231 | else | 230 | else |
@@ -243,7 +242,7 @@ static void pdc_set_dir(struct dma_chan *chan) | |||
243 | mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS << | 242 | mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS << |
244 | (DMA_CTL0_BITS_PER_CH * ch)); | 243 | (DMA_CTL0_BITS_PER_CH * ch)); |
245 | val &= mask_mode; | 244 | val &= mask_mode; |
246 | if (pd_chan->dir == DMA_MEM_TO_DEV) | 245 | if (pd_chan->dir == DMA_TO_DEVICE) |
247 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch + | 246 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch + |
248 | DMA_CTL0_DIR_SHIFT_BITS); | 247 | DMA_CTL0_DIR_SHIFT_BITS); |
249 | else | 248 | else |
@@ -417,6 +416,20 @@ static void pdc_advance_work(struct pch_dma_chan *pd_chan) | |||
417 | } | 416 | } |
418 | } | 417 | } |
419 | 418 | ||
419 | static dma_cookie_t pdc_assign_cookie(struct pch_dma_chan *pd_chan, | ||
420 | struct pch_dma_desc *desc) | ||
421 | { | ||
422 | dma_cookie_t cookie = pd_chan->chan.cookie; | ||
423 | |||
424 | if (++cookie < 0) | ||
425 | cookie = 1; | ||
426 | |||
427 | pd_chan->chan.cookie = cookie; | ||
428 | desc->txd.cookie = cookie; | ||
429 | |||
430 | return cookie; | ||
431 | } | ||
432 | |||
420 | static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) | 433 | static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) |
421 | { | 434 | { |
422 | struct pch_dma_desc *desc = to_pd_desc(txd); | 435 | struct pch_dma_desc *desc = to_pd_desc(txd); |
@@ -424,7 +437,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) | |||
424 | dma_cookie_t cookie; | 437 | dma_cookie_t cookie; |
425 | 438 | ||
426 | spin_lock(&pd_chan->lock); | 439 | spin_lock(&pd_chan->lock); |
427 | cookie = dma_cookie_assign(txd); | 440 | cookie = pdc_assign_cookie(pd_chan, desc); |
428 | 441 | ||
429 | if (list_empty(&pd_chan->active_list)) { | 442 | if (list_empty(&pd_chan->active_list)) { |
430 | list_add_tail(&desc->desc_node, &pd_chan->active_list); | 443 | list_add_tail(&desc->desc_node, &pd_chan->active_list); |
@@ -531,7 +544,7 @@ static int pd_alloc_chan_resources(struct dma_chan *chan) | |||
531 | spin_lock_irq(&pd_chan->lock); | 544 | spin_lock_irq(&pd_chan->lock); |
532 | list_splice(&tmp_list, &pd_chan->free_list); | 545 | list_splice(&tmp_list, &pd_chan->free_list); |
533 | pd_chan->descs_allocated = i; | 546 | pd_chan->descs_allocated = i; |
534 | dma_cookie_init(chan); | 547 | pd_chan->completed_cookie = chan->cookie = 1; |
535 | spin_unlock_irq(&pd_chan->lock); | 548 | spin_unlock_irq(&pd_chan->lock); |
536 | 549 | ||
537 | pdc_enable_irq(chan, 1); | 550 | pdc_enable_irq(chan, 1); |
@@ -565,12 +578,19 @@ static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
565 | struct dma_tx_state *txstate) | 578 | struct dma_tx_state *txstate) |
566 | { | 579 | { |
567 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | 580 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
568 | enum dma_status ret; | 581 | dma_cookie_t last_used; |
582 | dma_cookie_t last_completed; | ||
583 | int ret; | ||
569 | 584 | ||
570 | spin_lock_irq(&pd_chan->lock); | 585 | spin_lock_irq(&pd_chan->lock); |
571 | ret = dma_cookie_status(chan, cookie, txstate); | 586 | last_completed = pd_chan->completed_cookie; |
587 | last_used = chan->cookie; | ||
572 | spin_unlock_irq(&pd_chan->lock); | 588 | spin_unlock_irq(&pd_chan->lock); |
573 | 589 | ||
590 | ret = dma_async_is_complete(cookie, last_completed, last_used); | ||
591 | |||
592 | dma_set_tx_state(txstate, last_completed, last_used, 0); | ||
593 | |||
574 | return ret; | 594 | return ret; |
575 | } | 595 | } |
576 | 596 | ||
@@ -587,8 +607,7 @@ static void pd_issue_pending(struct dma_chan *chan) | |||
587 | 607 | ||
588 | static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, | 608 | static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, |
589 | struct scatterlist *sgl, unsigned int sg_len, | 609 | struct scatterlist *sgl, unsigned int sg_len, |
590 | enum dma_transfer_direction direction, unsigned long flags, | 610 | enum dma_data_direction direction, unsigned long flags) |
591 | void *context) | ||
592 | { | 611 | { |
593 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | 612 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
594 | struct pch_dma_slave *pd_slave = chan->private; | 613 | struct pch_dma_slave *pd_slave = chan->private; |
@@ -604,9 +623,9 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, | |||
604 | return NULL; | 623 | return NULL; |
605 | } | 624 | } |
606 | 625 | ||
607 | if (direction == DMA_DEV_TO_MEM) | 626 | if (direction == DMA_FROM_DEVICE) |
608 | reg = pd_slave->rx_reg; | 627 | reg = pd_slave->rx_reg; |
609 | else if (direction == DMA_MEM_TO_DEV) | 628 | else if (direction == DMA_TO_DEVICE) |
610 | reg = pd_slave->tx_reg; | 629 | reg = pd_slave->tx_reg; |
611 | else | 630 | else |
612 | return NULL; | 631 | return NULL; |
@@ -621,7 +640,7 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, | |||
621 | goto err_desc_get; | 640 | goto err_desc_get; |
622 | 641 | ||
623 | desc->regs.dev_addr = reg; | 642 | desc->regs.dev_addr = reg; |
624 | desc->regs.mem_addr = sg_dma_address(sg); | 643 | desc->regs.mem_addr = sg_phys(sg); |
625 | desc->regs.size = sg_dma_len(sg); | 644 | desc->regs.size = sg_dma_len(sg); |
626 | desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ; | 645 | desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ; |
627 | 646 | ||
@@ -843,7 +862,7 @@ static int pch_dma_resume(struct pci_dev *pdev) | |||
843 | } | 862 | } |
844 | #endif | 863 | #endif |
845 | 864 | ||
846 | static int pch_dma_probe(struct pci_dev *pdev, | 865 | static int __devinit pch_dma_probe(struct pci_dev *pdev, |
847 | const struct pci_device_id *id) | 866 | const struct pci_device_id *id) |
848 | { | 867 | { |
849 | struct pch_dma *pd; | 868 | struct pch_dma *pd; |
@@ -853,7 +872,8 @@ static int pch_dma_probe(struct pci_dev *pdev, | |||
853 | int i; | 872 | int i; |
854 | 873 | ||
855 | nr_channels = id->driver_data; | 874 | nr_channels = id->driver_data; |
856 | pd = kzalloc(sizeof(*pd), GFP_KERNEL); | 875 | pd = kzalloc(sizeof(struct pch_dma)+ |
876 | sizeof(struct pch_dma_chan) * nr_channels, GFP_KERNEL); | ||
857 | if (!pd) | 877 | if (!pd) |
858 | return -ENOMEM; | 878 | return -ENOMEM; |
859 | 879 | ||
@@ -906,6 +926,7 @@ static int pch_dma_probe(struct pci_dev *pdev, | |||
906 | } | 926 | } |
907 | 927 | ||
908 | pd->dma.dev = &pdev->dev; | 928 | pd->dma.dev = &pdev->dev; |
929 | pd->dma.chancnt = nr_channels; | ||
909 | 930 | ||
910 | INIT_LIST_HEAD(&pd->dma.channels); | 931 | INIT_LIST_HEAD(&pd->dma.channels); |
911 | 932 | ||
@@ -913,7 +934,8 @@ static int pch_dma_probe(struct pci_dev *pdev, | |||
913 | struct pch_dma_chan *pd_chan = &pd->channels[i]; | 934 | struct pch_dma_chan *pd_chan = &pd->channels[i]; |
914 | 935 | ||
915 | pd_chan->chan.device = &pd->dma; | 936 | pd_chan->chan.device = &pd->dma; |
916 | dma_cookie_init(&pd_chan->chan); | 937 | pd_chan->chan.cookie = 1; |
938 | pd_chan->chan.chan_id = i; | ||
917 | 939 | ||
918 | pd_chan->membase = ®s->desc[i]; | 940 | pd_chan->membase = ®s->desc[i]; |
919 | 941 | ||
@@ -961,7 +983,7 @@ err_free_mem: | |||
961 | return err; | 983 | return err; |
962 | } | 984 | } |
963 | 985 | ||
964 | static void pch_dma_remove(struct pci_dev *pdev) | 986 | static void __devexit pch_dma_remove(struct pci_dev *pdev) |
965 | { | 987 | { |
966 | struct pch_dma *pd = pci_get_drvdata(pdev); | 988 | struct pch_dma *pd = pci_get_drvdata(pdev); |
967 | struct pch_dma_chan *pd_chan; | 989 | struct pch_dma_chan *pd_chan; |
@@ -999,8 +1021,6 @@ static void pch_dma_remove(struct pci_dev *pdev) | |||
999 | #define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E | 1021 | #define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E |
1000 | #define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017 | 1022 | #define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017 |
1001 | #define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B | 1023 | #define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B |
1002 | #define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810 | ||
1003 | #define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815 | ||
1004 | 1024 | ||
1005 | DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = { | 1025 | DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = { |
1006 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, | 1026 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, |
@@ -1013,8 +1033,6 @@ DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = { | |||
1013 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */ | 1033 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */ |
1014 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */ | 1034 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */ |
1015 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */ | 1035 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */ |
1016 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA1_8CH), 8}, /* UART */ | ||
1017 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA2_4CH), 4}, /* SPI */ | ||
1018 | { 0, }, | 1036 | { 0, }, |
1019 | }; | 1037 | }; |
1020 | 1038 | ||
@@ -1022,7 +1040,7 @@ static struct pci_driver pch_dma_driver = { | |||
1022 | .name = DRV_NAME, | 1040 | .name = DRV_NAME, |
1023 | .id_table = pch_dma_id_table, | 1041 | .id_table = pch_dma_id_table, |
1024 | .probe = pch_dma_probe, | 1042 | .probe = pch_dma_probe, |
1025 | .remove = pch_dma_remove, | 1043 | .remove = __devexit_p(pch_dma_remove), |
1026 | #ifdef CONFIG_PM | 1044 | #ifdef CONFIG_PM |
1027 | .suspend = pch_dma_suspend, | 1045 | .suspend = pch_dma_suspend, |
1028 | .resume = pch_dma_resume, | 1046 | .resume = pch_dma_resume, |
@@ -1042,7 +1060,7 @@ static void __exit pch_dma_exit(void) | |||
1042 | module_init(pch_dma_init); | 1060 | module_init(pch_dma_init); |
1043 | module_exit(pch_dma_exit); | 1061 | module_exit(pch_dma_exit); |
1044 | 1062 | ||
1045 | MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH " | 1063 | MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH " |
1046 | "DMA controller driver"); | 1064 | "DMA controller driver"); |
1047 | MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); | 1065 | MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); |
1048 | MODULE_LICENSE("GPL v2"); | 1066 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 80680eee017..00eee59e8b3 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* linux/drivers/dma/pl330.c |
2 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. | ||
3 | * http://www.samsung.com | ||
4 | * | 2 | * |
5 | * Copyright (C) 2010 Samsung Electronics Co. Ltd. | 3 | * Copyright (C) 2010 Samsung Electronics Co. Ltd. |
6 | * Jaswinder Singh <jassi.brar@samsung.com> | 4 | * Jaswinder Singh <jassi.brar@samsung.com> |
@@ -11,517 +9,22 @@ | |||
11 | * (at your option) any later version. | 9 | * (at your option) any later version. |
12 | */ | 10 | */ |
13 | 11 | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/io.h> | 12 | #include <linux/io.h> |
16 | #include <linux/init.h> | 13 | #include <linux/init.h> |
17 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
18 | #include <linux/module.h> | 15 | #include <linux/module.h> |
19 | #include <linux/string.h> | ||
20 | #include <linux/delay.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/dma-mapping.h> | ||
23 | #include <linux/dmaengine.h> | 16 | #include <linux/dmaengine.h> |
17 | #include <linux/interrupt.h> | ||
24 | #include <linux/amba/bus.h> | 18 | #include <linux/amba/bus.h> |
25 | #include <linux/amba/pl330.h> | 19 | #include <linux/amba/pl330.h> |
26 | #include <linux/scatterlist.h> | ||
27 | #include <linux/of.h> | ||
28 | |||
29 | #include "dmaengine.h" | ||
30 | #define PL330_MAX_CHAN 8 | ||
31 | #define PL330_MAX_IRQS 32 | ||
32 | #define PL330_MAX_PERI 32 | ||
33 | |||
34 | enum pl330_srccachectrl { | ||
35 | SCCTRL0, /* Noncacheable and nonbufferable */ | ||
36 | SCCTRL1, /* Bufferable only */ | ||
37 | SCCTRL2, /* Cacheable, but do not allocate */ | ||
38 | SCCTRL3, /* Cacheable and bufferable, but do not allocate */ | ||
39 | SINVALID1, | ||
40 | SINVALID2, | ||
41 | SCCTRL6, /* Cacheable write-through, allocate on reads only */ | ||
42 | SCCTRL7, /* Cacheable write-back, allocate on reads only */ | ||
43 | }; | ||
44 | |||
45 | enum pl330_dstcachectrl { | ||
46 | DCCTRL0, /* Noncacheable and nonbufferable */ | ||
47 | DCCTRL1, /* Bufferable only */ | ||
48 | DCCTRL2, /* Cacheable, but do not allocate */ | ||
49 | DCCTRL3, /* Cacheable and bufferable, but do not allocate */ | ||
50 | DINVALID1, /* AWCACHE = 0x1000 */ | ||
51 | DINVALID2, | ||
52 | DCCTRL6, /* Cacheable write-through, allocate on writes only */ | ||
53 | DCCTRL7, /* Cacheable write-back, allocate on writes only */ | ||
54 | }; | ||
55 | |||
56 | enum pl330_byteswap { | ||
57 | SWAP_NO, | ||
58 | SWAP_2, | ||
59 | SWAP_4, | ||
60 | SWAP_8, | ||
61 | SWAP_16, | ||
62 | }; | ||
63 | |||
64 | enum pl330_reqtype { | ||
65 | MEMTOMEM, | ||
66 | MEMTODEV, | ||
67 | DEVTOMEM, | ||
68 | DEVTODEV, | ||
69 | }; | ||
70 | |||
71 | /* Register and Bit field Definitions */ | ||
72 | #define DS 0x0 | ||
73 | #define DS_ST_STOP 0x0 | ||
74 | #define DS_ST_EXEC 0x1 | ||
75 | #define DS_ST_CMISS 0x2 | ||
76 | #define DS_ST_UPDTPC 0x3 | ||
77 | #define DS_ST_WFE 0x4 | ||
78 | #define DS_ST_ATBRR 0x5 | ||
79 | #define DS_ST_QBUSY 0x6 | ||
80 | #define DS_ST_WFP 0x7 | ||
81 | #define DS_ST_KILL 0x8 | ||
82 | #define DS_ST_CMPLT 0x9 | ||
83 | #define DS_ST_FLTCMP 0xe | ||
84 | #define DS_ST_FAULT 0xf | ||
85 | |||
86 | #define DPC 0x4 | ||
87 | #define INTEN 0x20 | ||
88 | #define ES 0x24 | ||
89 | #define INTSTATUS 0x28 | ||
90 | #define INTCLR 0x2c | ||
91 | #define FSM 0x30 | ||
92 | #define FSC 0x34 | ||
93 | #define FTM 0x38 | ||
94 | |||
95 | #define _FTC 0x40 | ||
96 | #define FTC(n) (_FTC + (n)*0x4) | ||
97 | |||
98 | #define _CS 0x100 | ||
99 | #define CS(n) (_CS + (n)*0x8) | ||
100 | #define CS_CNS (1 << 21) | ||
101 | |||
102 | #define _CPC 0x104 | ||
103 | #define CPC(n) (_CPC + (n)*0x8) | ||
104 | |||
105 | #define _SA 0x400 | ||
106 | #define SA(n) (_SA + (n)*0x20) | ||
107 | |||
108 | #define _DA 0x404 | ||
109 | #define DA(n) (_DA + (n)*0x20) | ||
110 | |||
111 | #define _CC 0x408 | ||
112 | #define CC(n) (_CC + (n)*0x20) | ||
113 | |||
114 | #define CC_SRCINC (1 << 0) | ||
115 | #define CC_DSTINC (1 << 14) | ||
116 | #define CC_SRCPRI (1 << 8) | ||
117 | #define CC_DSTPRI (1 << 22) | ||
118 | #define CC_SRCNS (1 << 9) | ||
119 | #define CC_DSTNS (1 << 23) | ||
120 | #define CC_SRCIA (1 << 10) | ||
121 | #define CC_DSTIA (1 << 24) | ||
122 | #define CC_SRCBRSTLEN_SHFT 4 | ||
123 | #define CC_DSTBRSTLEN_SHFT 18 | ||
124 | #define CC_SRCBRSTSIZE_SHFT 1 | ||
125 | #define CC_DSTBRSTSIZE_SHFT 15 | ||
126 | #define CC_SRCCCTRL_SHFT 11 | ||
127 | #define CC_SRCCCTRL_MASK 0x7 | ||
128 | #define CC_DSTCCTRL_SHFT 25 | ||
129 | #define CC_DRCCCTRL_MASK 0x7 | ||
130 | #define CC_SWAP_SHFT 28 | ||
131 | |||
132 | #define _LC0 0x40c | ||
133 | #define LC0(n) (_LC0 + (n)*0x20) | ||
134 | |||
135 | #define _LC1 0x410 | ||
136 | #define LC1(n) (_LC1 + (n)*0x20) | ||
137 | |||
138 | #define DBGSTATUS 0xd00 | ||
139 | #define DBG_BUSY (1 << 0) | ||
140 | |||
141 | #define DBGCMD 0xd04 | ||
142 | #define DBGINST0 0xd08 | ||
143 | #define DBGINST1 0xd0c | ||
144 | |||
145 | #define CR0 0xe00 | ||
146 | #define CR1 0xe04 | ||
147 | #define CR2 0xe08 | ||
148 | #define CR3 0xe0c | ||
149 | #define CR4 0xe10 | ||
150 | #define CRD 0xe14 | ||
151 | |||
152 | #define PERIPH_ID 0xfe0 | ||
153 | #define PERIPH_REV_SHIFT 20 | ||
154 | #define PERIPH_REV_MASK 0xf | ||
155 | #define PERIPH_REV_R0P0 0 | ||
156 | #define PERIPH_REV_R1P0 1 | ||
157 | #define PERIPH_REV_R1P1 2 | ||
158 | #define PCELL_ID 0xff0 | ||
159 | |||
160 | #define CR0_PERIPH_REQ_SET (1 << 0) | ||
161 | #define CR0_BOOT_EN_SET (1 << 1) | ||
162 | #define CR0_BOOT_MAN_NS (1 << 2) | ||
163 | #define CR0_NUM_CHANS_SHIFT 4 | ||
164 | #define CR0_NUM_CHANS_MASK 0x7 | ||
165 | #define CR0_NUM_PERIPH_SHIFT 12 | ||
166 | #define CR0_NUM_PERIPH_MASK 0x1f | ||
167 | #define CR0_NUM_EVENTS_SHIFT 17 | ||
168 | #define CR0_NUM_EVENTS_MASK 0x1f | ||
169 | |||
170 | #define CR1_ICACHE_LEN_SHIFT 0 | ||
171 | #define CR1_ICACHE_LEN_MASK 0x7 | ||
172 | #define CR1_NUM_ICACHELINES_SHIFT 4 | ||
173 | #define CR1_NUM_ICACHELINES_MASK 0xf | ||
174 | |||
175 | #define CRD_DATA_WIDTH_SHIFT 0 | ||
176 | #define CRD_DATA_WIDTH_MASK 0x7 | ||
177 | #define CRD_WR_CAP_SHIFT 4 | ||
178 | #define CRD_WR_CAP_MASK 0x7 | ||
179 | #define CRD_WR_Q_DEP_SHIFT 8 | ||
180 | #define CRD_WR_Q_DEP_MASK 0xf | ||
181 | #define CRD_RD_CAP_SHIFT 12 | ||
182 | #define CRD_RD_CAP_MASK 0x7 | ||
183 | #define CRD_RD_Q_DEP_SHIFT 16 | ||
184 | #define CRD_RD_Q_DEP_MASK 0xf | ||
185 | #define CRD_DATA_BUFF_SHIFT 20 | ||
186 | #define CRD_DATA_BUFF_MASK 0x3ff | ||
187 | |||
188 | #define PART 0x330 | ||
189 | #define DESIGNER 0x41 | ||
190 | #define REVISION 0x0 | ||
191 | #define INTEG_CFG 0x0 | ||
192 | #define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12)) | ||
193 | |||
194 | #define PCELL_ID_VAL 0xb105f00d | ||
195 | |||
196 | #define PL330_STATE_STOPPED (1 << 0) | ||
197 | #define PL330_STATE_EXECUTING (1 << 1) | ||
198 | #define PL330_STATE_WFE (1 << 2) | ||
199 | #define PL330_STATE_FAULTING (1 << 3) | ||
200 | #define PL330_STATE_COMPLETING (1 << 4) | ||
201 | #define PL330_STATE_WFP (1 << 5) | ||
202 | #define PL330_STATE_KILLING (1 << 6) | ||
203 | #define PL330_STATE_FAULT_COMPLETING (1 << 7) | ||
204 | #define PL330_STATE_CACHEMISS (1 << 8) | ||
205 | #define PL330_STATE_UPDTPC (1 << 9) | ||
206 | #define PL330_STATE_ATBARRIER (1 << 10) | ||
207 | #define PL330_STATE_QUEUEBUSY (1 << 11) | ||
208 | #define PL330_STATE_INVALID (1 << 15) | ||
209 | |||
210 | #define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \ | ||
211 | | PL330_STATE_WFE | PL330_STATE_FAULTING) | ||
212 | |||
213 | #define CMD_DMAADDH 0x54 | ||
214 | #define CMD_DMAEND 0x00 | ||
215 | #define CMD_DMAFLUSHP 0x35 | ||
216 | #define CMD_DMAGO 0xa0 | ||
217 | #define CMD_DMALD 0x04 | ||
218 | #define CMD_DMALDP 0x25 | ||
219 | #define CMD_DMALP 0x20 | ||
220 | #define CMD_DMALPEND 0x28 | ||
221 | #define CMD_DMAKILL 0x01 | ||
222 | #define CMD_DMAMOV 0xbc | ||
223 | #define CMD_DMANOP 0x18 | ||
224 | #define CMD_DMARMB 0x12 | ||
225 | #define CMD_DMASEV 0x34 | ||
226 | #define CMD_DMAST 0x08 | ||
227 | #define CMD_DMASTP 0x29 | ||
228 | #define CMD_DMASTZ 0x0c | ||
229 | #define CMD_DMAWFE 0x36 | ||
230 | #define CMD_DMAWFP 0x30 | ||
231 | #define CMD_DMAWMB 0x13 | ||
232 | |||
233 | #define SZ_DMAADDH 3 | ||
234 | #define SZ_DMAEND 1 | ||
235 | #define SZ_DMAFLUSHP 2 | ||
236 | #define SZ_DMALD 1 | ||
237 | #define SZ_DMALDP 2 | ||
238 | #define SZ_DMALP 2 | ||
239 | #define SZ_DMALPEND 2 | ||
240 | #define SZ_DMAKILL 1 | ||
241 | #define SZ_DMAMOV 6 | ||
242 | #define SZ_DMANOP 1 | ||
243 | #define SZ_DMARMB 1 | ||
244 | #define SZ_DMASEV 2 | ||
245 | #define SZ_DMAST 1 | ||
246 | #define SZ_DMASTP 2 | ||
247 | #define SZ_DMASTZ 1 | ||
248 | #define SZ_DMAWFE 2 | ||
249 | #define SZ_DMAWFP 2 | ||
250 | #define SZ_DMAWMB 1 | ||
251 | #define SZ_DMAGO 6 | ||
252 | |||
253 | #define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1) | ||
254 | #define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7)) | ||
255 | |||
256 | #define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr)) | ||
257 | #define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr)) | ||
258 | |||
259 | /* | ||
260 | * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req | ||
261 | * at 1byte/burst for P<->M and M<->M respectively. | ||
262 | * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req | ||
263 | * should be enough for P<->M and M<->M respectively. | ||
264 | */ | ||
265 | #define MCODE_BUFF_PER_REQ 256 | ||
266 | |||
267 | /* If the _pl330_req is available to the client */ | ||
268 | #define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND) | ||
269 | |||
270 | /* Use this _only_ to wait on transient states */ | ||
271 | #define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax(); | ||
272 | |||
273 | #ifdef PL330_DEBUG_MCGEN | ||
274 | static unsigned cmd_line; | ||
275 | #define PL330_DBGCMD_DUMP(off, x...) do { \ | ||
276 | printk("%x:", cmd_line); \ | ||
277 | printk(x); \ | ||
278 | cmd_line += off; \ | ||
279 | } while (0) | ||
280 | #define PL330_DBGMC_START(addr) (cmd_line = addr) | ||
281 | #else | ||
282 | #define PL330_DBGCMD_DUMP(off, x...) do {} while (0) | ||
283 | #define PL330_DBGMC_START(addr) do {} while (0) | ||
284 | #endif | ||
285 | |||
286 | /* The number of default descriptors */ | ||
287 | 20 | ||
288 | #define NR_DEFAULT_DESC 16 | 21 | #define NR_DEFAULT_DESC 16 |
289 | 22 | ||
290 | /* Populated by the PL330 core driver for DMA API driver's info */ | ||
291 | struct pl330_config { | ||
292 | u32 periph_id; | ||
293 | u32 pcell_id; | ||
294 | #define DMAC_MODE_NS (1 << 0) | ||
295 | unsigned int mode; | ||
296 | unsigned int data_bus_width:10; /* In number of bits */ | ||
297 | unsigned int data_buf_dep:10; | ||
298 | unsigned int num_chan:4; | ||
299 | unsigned int num_peri:6; | ||
300 | u32 peri_ns; | ||
301 | unsigned int num_events:6; | ||
302 | u32 irq_ns; | ||
303 | }; | ||
304 | |||
305 | /* Handle to the DMAC provided to the PL330 core */ | ||
306 | struct pl330_info { | ||
307 | /* Owning device */ | ||
308 | struct device *dev; | ||
309 | /* Size of MicroCode buffers for each channel. */ | ||
310 | unsigned mcbufsz; | ||
311 | /* ioremap'ed address of PL330 registers. */ | ||
312 | void __iomem *base; | ||
313 | /* Client can freely use it. */ | ||
314 | void *client_data; | ||
315 | /* PL330 core data, Client must not touch it. */ | ||
316 | void *pl330_data; | ||
317 | /* Populated by the PL330 core driver during pl330_add */ | ||
318 | struct pl330_config pcfg; | ||
319 | /* | ||
320 | * If the DMAC has some reset mechanism, then the | ||
321 | * client may want to provide pointer to the method. | ||
322 | */ | ||
323 | void (*dmac_reset)(struct pl330_info *pi); | ||
324 | }; | ||
325 | |||
326 | /** | ||
327 | * Request Configuration. | ||
328 | * The PL330 core does not modify this and uses the last | ||
329 | * working configuration if the request doesn't provide any. | ||
330 | * | ||
331 | * The Client may want to provide this info only for the | ||
332 | * first request and a request with new settings. | ||
333 | */ | ||
334 | struct pl330_reqcfg { | ||
335 | /* Address Incrementing */ | ||
336 | unsigned dst_inc:1; | ||
337 | unsigned src_inc:1; | ||
338 | |||
339 | /* | ||
340 | * For now, the SRC & DST protection levels | ||
341 | * and burst size/length are assumed same. | ||
342 | */ | ||
343 | bool nonsecure; | ||
344 | bool privileged; | ||
345 | bool insnaccess; | ||
346 | unsigned brst_len:5; | ||
347 | unsigned brst_size:3; /* in power of 2 */ | ||
348 | |||
349 | enum pl330_dstcachectrl dcctl; | ||
350 | enum pl330_srccachectrl scctl; | ||
351 | enum pl330_byteswap swap; | ||
352 | struct pl330_config *pcfg; | ||
353 | }; | ||
354 | |||
355 | /* | ||
356 | * One cycle of DMAC operation. | ||
357 | * There may be more than one xfer in a request. | ||
358 | */ | ||
359 | struct pl330_xfer { | ||
360 | u32 src_addr; | ||
361 | u32 dst_addr; | ||
362 | /* Size to xfer */ | ||
363 | u32 bytes; | ||
364 | /* | ||
365 | * Pointer to next xfer in the list. | ||
366 | * The last xfer in the req must point to NULL. | ||
367 | */ | ||
368 | struct pl330_xfer *next; | ||
369 | }; | ||
370 | |||
371 | /* The xfer callbacks are made with one of these arguments. */ | ||
372 | enum pl330_op_err { | ||
373 | /* The all xfers in the request were success. */ | ||
374 | PL330_ERR_NONE, | ||
375 | /* If req aborted due to global error. */ | ||
376 | PL330_ERR_ABORT, | ||
377 | /* If req failed due to problem with Channel. */ | ||
378 | PL330_ERR_FAIL, | ||
379 | }; | ||
380 | |||
381 | /* A request defining Scatter-Gather List ending with NULL xfer. */ | ||
382 | struct pl330_req { | ||
383 | enum pl330_reqtype rqtype; | ||
384 | /* Index of peripheral for the xfer. */ | ||
385 | unsigned peri:5; | ||
386 | /* Unique token for this xfer, set by the client. */ | ||
387 | void *token; | ||
388 | /* Callback to be called after xfer. */ | ||
389 | void (*xfer_cb)(void *token, enum pl330_op_err err); | ||
390 | /* If NULL, req will be done at last set parameters. */ | ||
391 | struct pl330_reqcfg *cfg; | ||
392 | /* Pointer to first xfer in the request. */ | ||
393 | struct pl330_xfer *x; | ||
394 | /* Hook to attach to DMAC's list of reqs with due callback */ | ||
395 | struct list_head rqd; | ||
396 | }; | ||
397 | |||
398 | /* | ||
399 | * To know the status of the channel and DMAC, the client | ||
400 | * provides a pointer to this structure. The PL330 core | ||
401 | * fills it with current information. | ||
402 | */ | ||
403 | struct pl330_chanstatus { | ||
404 | /* | ||
405 | * If the DMAC engine halted due to some error, | ||
406 | * the client should remove-add DMAC. | ||
407 | */ | ||
408 | bool dmac_halted; | ||
409 | /* | ||
410 | * If channel is halted due to some error, | ||
411 | * the client should ABORT/FLUSH and START the channel. | ||
412 | */ | ||
413 | bool faulting; | ||
414 | /* Location of last load */ | ||
415 | u32 src_addr; | ||
416 | /* Location of last store */ | ||
417 | u32 dst_addr; | ||
418 | /* | ||
419 | * Pointer to the currently active req, NULL if channel is | ||
420 | * inactive, even though the requests may be present. | ||
421 | */ | ||
422 | struct pl330_req *top_req; | ||
423 | /* Pointer to req waiting second in the queue if any. */ | ||
424 | struct pl330_req *wait_req; | ||
425 | }; | ||
426 | |||
427 | enum pl330_chan_op { | ||
428 | /* Start the channel */ | ||
429 | PL330_OP_START, | ||
430 | /* Abort the active xfer */ | ||
431 | PL330_OP_ABORT, | ||
432 | /* Stop xfer and flush queue */ | ||
433 | PL330_OP_FLUSH, | ||
434 | }; | ||
435 | |||
436 | struct _xfer_spec { | ||
437 | u32 ccr; | ||
438 | struct pl330_req *r; | ||
439 | struct pl330_xfer *x; | ||
440 | }; | ||
441 | |||
442 | enum dmamov_dst { | ||
443 | SAR = 0, | ||
444 | CCR, | ||
445 | DAR, | ||
446 | }; | ||
447 | |||
448 | enum pl330_dst { | ||
449 | SRC = 0, | ||
450 | DST, | ||
451 | }; | ||
452 | |||
453 | enum pl330_cond { | ||
454 | SINGLE, | ||
455 | BURST, | ||
456 | ALWAYS, | ||
457 | }; | ||
458 | |||
459 | struct _pl330_req { | ||
460 | u32 mc_bus; | ||
461 | void *mc_cpu; | ||
462 | /* Number of bytes taken to setup MC for the req */ | ||
463 | u32 mc_len; | ||
464 | struct pl330_req *r; | ||
465 | }; | ||
466 | |||
467 | /* ToBeDone for tasklet */ | ||
468 | struct _pl330_tbd { | ||
469 | bool reset_dmac; | ||
470 | bool reset_mngr; | ||
471 | u8 reset_chan; | ||
472 | }; | ||
473 | |||
474 | /* A DMAC Thread */ | ||
475 | struct pl330_thread { | ||
476 | u8 id; | ||
477 | int ev; | ||
478 | /* If the channel is not yet acquired by any client */ | ||
479 | bool free; | ||
480 | /* Parent DMAC */ | ||
481 | struct pl330_dmac *dmac; | ||
482 | /* Only two at a time */ | ||
483 | struct _pl330_req req[2]; | ||
484 | /* Index of the last enqueued request */ | ||
485 | unsigned lstenq; | ||
486 | /* Index of the last submitted request or -1 if the DMA is stopped */ | ||
487 | int req_running; | ||
488 | }; | ||
489 | |||
490 | enum pl330_dmac_state { | ||
491 | UNINIT, | ||
492 | INIT, | ||
493 | DYING, | ||
494 | }; | ||
495 | |||
496 | /* A DMAC */ | ||
497 | struct pl330_dmac { | ||
498 | spinlock_t lock; | ||
499 | /* Holds list of reqs with due callbacks */ | ||
500 | struct list_head req_done; | ||
501 | /* Pointer to platform specific stuff */ | ||
502 | struct pl330_info *pinfo; | ||
503 | /* Maximum possible events/irqs */ | ||
504 | int events[32]; | ||
505 | /* BUS address of MicroCode buffer */ | ||
506 | u32 mcode_bus; | ||
507 | /* CPU address of MicroCode buffer */ | ||
508 | void *mcode_cpu; | ||
509 | /* List of all Channel threads */ | ||
510 | struct pl330_thread *channels; | ||
511 | /* Pointer to the MANAGER thread */ | ||
512 | struct pl330_thread *manager; | ||
513 | /* To handle bad news in interrupt */ | ||
514 | struct tasklet_struct tasks; | ||
515 | struct _pl330_tbd dmac_tbd; | ||
516 | /* State of DMAC operation */ | ||
517 | enum pl330_dmac_state state; | ||
518 | }; | ||
519 | |||
520 | enum desc_status { | 23 | enum desc_status { |
521 | /* In the DMAC pool */ | 24 | /* In the DMAC pool */ |
522 | FREE, | 25 | FREE, |
523 | /* | 26 | /* |
524 | * Allocated to some channel during prep_xxx | 27 | * Allocted to some channel during prep_xxx |
525 | * Also may be sitting on the work_list. | 28 | * Also may be sitting on the work_list. |
526 | */ | 29 | */ |
527 | PREP, | 30 | PREP, |
@@ -545,6 +48,9 @@ struct dma_pl330_chan { | |||
545 | /* DMA-Engine Channel */ | 48 | /* DMA-Engine Channel */ |
546 | struct dma_chan chan; | 49 | struct dma_chan chan; |
547 | 50 | ||
51 | /* Last completed cookie */ | ||
52 | dma_cookie_t completed; | ||
53 | |||
548 | /* List of to be xfered descriptors */ | 54 | /* List of to be xfered descriptors */ |
549 | struct list_head work_list; | 55 | struct list_head work_list; |
550 | 56 | ||
@@ -562,14 +68,6 @@ struct dma_pl330_chan { | |||
562 | * NULL if the channel is available to be acquired. | 68 | * NULL if the channel is available to be acquired. |
563 | */ | 69 | */ |
564 | void *pl330_chid; | 70 | void *pl330_chid; |
565 | |||
566 | /* For D-to-M and M-to-D channels */ | ||
567 | int burst_sz; /* the peripheral fifo width */ | ||
568 | int burst_len; /* the number of burst */ | ||
569 | dma_addr_t fifo_addr; | ||
570 | |||
571 | /* for cyclic capability */ | ||
572 | bool cyclic; | ||
573 | }; | 71 | }; |
574 | 72 | ||
575 | struct dma_pl330_dmac { | 73 | struct dma_pl330_dmac { |
@@ -606,1598 +104,6 @@ struct dma_pl330_desc { | |||
606 | struct dma_pl330_chan *pchan; | 104 | struct dma_pl330_chan *pchan; |
607 | }; | 105 | }; |
608 | 106 | ||
609 | static inline void _callback(struct pl330_req *r, enum pl330_op_err err) | ||
610 | { | ||
611 | if (r && r->xfer_cb) | ||
612 | r->xfer_cb(r->token, err); | ||
613 | } | ||
614 | |||
615 | static inline bool _queue_empty(struct pl330_thread *thrd) | ||
616 | { | ||
617 | return (IS_FREE(&thrd->req[0]) && IS_FREE(&thrd->req[1])) | ||
618 | ? true : false; | ||
619 | } | ||
620 | |||
621 | static inline bool _queue_full(struct pl330_thread *thrd) | ||
622 | { | ||
623 | return (IS_FREE(&thrd->req[0]) || IS_FREE(&thrd->req[1])) | ||
624 | ? false : true; | ||
625 | } | ||
626 | |||
627 | static inline bool is_manager(struct pl330_thread *thrd) | ||
628 | { | ||
629 | struct pl330_dmac *pl330 = thrd->dmac; | ||
630 | |||
631 | /* MANAGER is indexed at the end */ | ||
632 | if (thrd->id == pl330->pinfo->pcfg.num_chan) | ||
633 | return true; | ||
634 | else | ||
635 | return false; | ||
636 | } | ||
637 | |||
638 | /* If manager of the thread is in Non-Secure mode */ | ||
639 | static inline bool _manager_ns(struct pl330_thread *thrd) | ||
640 | { | ||
641 | struct pl330_dmac *pl330 = thrd->dmac; | ||
642 | |||
643 | return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false; | ||
644 | } | ||
645 | |||
646 | static inline u32 get_id(struct pl330_info *pi, u32 off) | ||
647 | { | ||
648 | void __iomem *regs = pi->base; | ||
649 | u32 id = 0; | ||
650 | |||
651 | id |= (readb(regs + off + 0x0) << 0); | ||
652 | id |= (readb(regs + off + 0x4) << 8); | ||
653 | id |= (readb(regs + off + 0x8) << 16); | ||
654 | id |= (readb(regs + off + 0xc) << 24); | ||
655 | |||
656 | return id; | ||
657 | } | ||
658 | |||
659 | static inline u32 get_revision(u32 periph_id) | ||
660 | { | ||
661 | return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK; | ||
662 | } | ||
663 | |||
664 | static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[], | ||
665 | enum pl330_dst da, u16 val) | ||
666 | { | ||
667 | if (dry_run) | ||
668 | return SZ_DMAADDH; | ||
669 | |||
670 | buf[0] = CMD_DMAADDH; | ||
671 | buf[0] |= (da << 1); | ||
672 | *((u16 *)&buf[1]) = val; | ||
673 | |||
674 | PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n", | ||
675 | da == 1 ? "DA" : "SA", val); | ||
676 | |||
677 | return SZ_DMAADDH; | ||
678 | } | ||
679 | |||
680 | static inline u32 _emit_END(unsigned dry_run, u8 buf[]) | ||
681 | { | ||
682 | if (dry_run) | ||
683 | return SZ_DMAEND; | ||
684 | |||
685 | buf[0] = CMD_DMAEND; | ||
686 | |||
687 | PL330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n"); | ||
688 | |||
689 | return SZ_DMAEND; | ||
690 | } | ||
691 | |||
692 | static inline u32 _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri) | ||
693 | { | ||
694 | if (dry_run) | ||
695 | return SZ_DMAFLUSHP; | ||
696 | |||
697 | buf[0] = CMD_DMAFLUSHP; | ||
698 | |||
699 | peri &= 0x1f; | ||
700 | peri <<= 3; | ||
701 | buf[1] = peri; | ||
702 | |||
703 | PL330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3); | ||
704 | |||
705 | return SZ_DMAFLUSHP; | ||
706 | } | ||
707 | |||
708 | static inline u32 _emit_LD(unsigned dry_run, u8 buf[], enum pl330_cond cond) | ||
709 | { | ||
710 | if (dry_run) | ||
711 | return SZ_DMALD; | ||
712 | |||
713 | buf[0] = CMD_DMALD; | ||
714 | |||
715 | if (cond == SINGLE) | ||
716 | buf[0] |= (0 << 1) | (1 << 0); | ||
717 | else if (cond == BURST) | ||
718 | buf[0] |= (1 << 1) | (1 << 0); | ||
719 | |||
720 | PL330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n", | ||
721 | cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A')); | ||
722 | |||
723 | return SZ_DMALD; | ||
724 | } | ||
725 | |||
726 | static inline u32 _emit_LDP(unsigned dry_run, u8 buf[], | ||
727 | enum pl330_cond cond, u8 peri) | ||
728 | { | ||
729 | if (dry_run) | ||
730 | return SZ_DMALDP; | ||
731 | |||
732 | buf[0] = CMD_DMALDP; | ||
733 | |||
734 | if (cond == BURST) | ||
735 | buf[0] |= (1 << 1); | ||
736 | |||
737 | peri &= 0x1f; | ||
738 | peri <<= 3; | ||
739 | buf[1] = peri; | ||
740 | |||
741 | PL330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n", | ||
742 | cond == SINGLE ? 'S' : 'B', peri >> 3); | ||
743 | |||
744 | return SZ_DMALDP; | ||
745 | } | ||
746 | |||
747 | static inline u32 _emit_LP(unsigned dry_run, u8 buf[], | ||
748 | unsigned loop, u8 cnt) | ||
749 | { | ||
750 | if (dry_run) | ||
751 | return SZ_DMALP; | ||
752 | |||
753 | buf[0] = CMD_DMALP; | ||
754 | |||
755 | if (loop) | ||
756 | buf[0] |= (1 << 1); | ||
757 | |||
758 | cnt--; /* DMAC increments by 1 internally */ | ||
759 | buf[1] = cnt; | ||
760 | |||
761 | PL330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt); | ||
762 | |||
763 | return SZ_DMALP; | ||
764 | } | ||
765 | |||
766 | struct _arg_LPEND { | ||
767 | enum pl330_cond cond; | ||
768 | bool forever; | ||
769 | unsigned loop; | ||
770 | u8 bjump; | ||
771 | }; | ||
772 | |||
773 | static inline u32 _emit_LPEND(unsigned dry_run, u8 buf[], | ||
774 | const struct _arg_LPEND *arg) | ||
775 | { | ||
776 | enum pl330_cond cond = arg->cond; | ||
777 | bool forever = arg->forever; | ||
778 | unsigned loop = arg->loop; | ||
779 | u8 bjump = arg->bjump; | ||
780 | |||
781 | if (dry_run) | ||
782 | return SZ_DMALPEND; | ||
783 | |||
784 | buf[0] = CMD_DMALPEND; | ||
785 | |||
786 | if (loop) | ||
787 | buf[0] |= (1 << 2); | ||
788 | |||
789 | if (!forever) | ||
790 | buf[0] |= (1 << 4); | ||
791 | |||
792 | if (cond == SINGLE) | ||
793 | buf[0] |= (0 << 1) | (1 << 0); | ||
794 | else if (cond == BURST) | ||
795 | buf[0] |= (1 << 1) | (1 << 0); | ||
796 | |||
797 | buf[1] = bjump; | ||
798 | |||
799 | PL330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n", | ||
800 | forever ? "FE" : "END", | ||
801 | cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'), | ||
802 | loop ? '1' : '0', | ||
803 | bjump); | ||
804 | |||
805 | return SZ_DMALPEND; | ||
806 | } | ||
807 | |||
808 | static inline u32 _emit_KILL(unsigned dry_run, u8 buf[]) | ||
809 | { | ||
810 | if (dry_run) | ||
811 | return SZ_DMAKILL; | ||
812 | |||
813 | buf[0] = CMD_DMAKILL; | ||
814 | |||
815 | return SZ_DMAKILL; | ||
816 | } | ||
817 | |||
818 | static inline u32 _emit_MOV(unsigned dry_run, u8 buf[], | ||
819 | enum dmamov_dst dst, u32 val) | ||
820 | { | ||
821 | if (dry_run) | ||
822 | return SZ_DMAMOV; | ||
823 | |||
824 | buf[0] = CMD_DMAMOV; | ||
825 | buf[1] = dst; | ||
826 | *((u32 *)&buf[2]) = val; | ||
827 | |||
828 | PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n", | ||
829 | dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val); | ||
830 | |||
831 | return SZ_DMAMOV; | ||
832 | } | ||
833 | |||
834 | static inline u32 _emit_NOP(unsigned dry_run, u8 buf[]) | ||
835 | { | ||
836 | if (dry_run) | ||
837 | return SZ_DMANOP; | ||
838 | |||
839 | buf[0] = CMD_DMANOP; | ||
840 | |||
841 | PL330_DBGCMD_DUMP(SZ_DMANOP, "\tDMANOP\n"); | ||
842 | |||
843 | return SZ_DMANOP; | ||
844 | } | ||
845 | |||
846 | static inline u32 _emit_RMB(unsigned dry_run, u8 buf[]) | ||
847 | { | ||
848 | if (dry_run) | ||
849 | return SZ_DMARMB; | ||
850 | |||
851 | buf[0] = CMD_DMARMB; | ||
852 | |||
853 | PL330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n"); | ||
854 | |||
855 | return SZ_DMARMB; | ||
856 | } | ||
857 | |||
858 | static inline u32 _emit_SEV(unsigned dry_run, u8 buf[], u8 ev) | ||
859 | { | ||
860 | if (dry_run) | ||
861 | return SZ_DMASEV; | ||
862 | |||
863 | buf[0] = CMD_DMASEV; | ||
864 | |||
865 | ev &= 0x1f; | ||
866 | ev <<= 3; | ||
867 | buf[1] = ev; | ||
868 | |||
869 | PL330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3); | ||
870 | |||
871 | return SZ_DMASEV; | ||
872 | } | ||
873 | |||
874 | static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum pl330_cond cond) | ||
875 | { | ||
876 | if (dry_run) | ||
877 | return SZ_DMAST; | ||
878 | |||
879 | buf[0] = CMD_DMAST; | ||
880 | |||
881 | if (cond == SINGLE) | ||
882 | buf[0] |= (0 << 1) | (1 << 0); | ||
883 | else if (cond == BURST) | ||
884 | buf[0] |= (1 << 1) | (1 << 0); | ||
885 | |||
886 | PL330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n", | ||
887 | cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A')); | ||
888 | |||
889 | return SZ_DMAST; | ||
890 | } | ||
891 | |||
892 | static inline u32 _emit_STP(unsigned dry_run, u8 buf[], | ||
893 | enum pl330_cond cond, u8 peri) | ||
894 | { | ||
895 | if (dry_run) | ||
896 | return SZ_DMASTP; | ||
897 | |||
898 | buf[0] = CMD_DMASTP; | ||
899 | |||
900 | if (cond == BURST) | ||
901 | buf[0] |= (1 << 1); | ||
902 | |||
903 | peri &= 0x1f; | ||
904 | peri <<= 3; | ||
905 | buf[1] = peri; | ||
906 | |||
907 | PL330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n", | ||
908 | cond == SINGLE ? 'S' : 'B', peri >> 3); | ||
909 | |||
910 | return SZ_DMASTP; | ||
911 | } | ||
912 | |||
913 | static inline u32 _emit_STZ(unsigned dry_run, u8 buf[]) | ||
914 | { | ||
915 | if (dry_run) | ||
916 | return SZ_DMASTZ; | ||
917 | |||
918 | buf[0] = CMD_DMASTZ; | ||
919 | |||
920 | PL330_DBGCMD_DUMP(SZ_DMASTZ, "\tDMASTZ\n"); | ||
921 | |||
922 | return SZ_DMASTZ; | ||
923 | } | ||
924 | |||
925 | static inline u32 _emit_WFE(unsigned dry_run, u8 buf[], u8 ev, | ||
926 | unsigned invalidate) | ||
927 | { | ||
928 | if (dry_run) | ||
929 | return SZ_DMAWFE; | ||
930 | |||
931 | buf[0] = CMD_DMAWFE; | ||
932 | |||
933 | ev &= 0x1f; | ||
934 | ev <<= 3; | ||
935 | buf[1] = ev; | ||
936 | |||
937 | if (invalidate) | ||
938 | buf[1] |= (1 << 1); | ||
939 | |||
940 | PL330_DBGCMD_DUMP(SZ_DMAWFE, "\tDMAWFE %u%s\n", | ||
941 | ev >> 3, invalidate ? ", I" : ""); | ||
942 | |||
943 | return SZ_DMAWFE; | ||
944 | } | ||
945 | |||
946 | static inline u32 _emit_WFP(unsigned dry_run, u8 buf[], | ||
947 | enum pl330_cond cond, u8 peri) | ||
948 | { | ||
949 | if (dry_run) | ||
950 | return SZ_DMAWFP; | ||
951 | |||
952 | buf[0] = CMD_DMAWFP; | ||
953 | |||
954 | if (cond == SINGLE) | ||
955 | buf[0] |= (0 << 1) | (0 << 0); | ||
956 | else if (cond == BURST) | ||
957 | buf[0] |= (1 << 1) | (0 << 0); | ||
958 | else | ||
959 | buf[0] |= (0 << 1) | (1 << 0); | ||
960 | |||
961 | peri &= 0x1f; | ||
962 | peri <<= 3; | ||
963 | buf[1] = peri; | ||
964 | |||
965 | PL330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n", | ||
966 | cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3); | ||
967 | |||
968 | return SZ_DMAWFP; | ||
969 | } | ||
970 | |||
971 | static inline u32 _emit_WMB(unsigned dry_run, u8 buf[]) | ||
972 | { | ||
973 | if (dry_run) | ||
974 | return SZ_DMAWMB; | ||
975 | |||
976 | buf[0] = CMD_DMAWMB; | ||
977 | |||
978 | PL330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n"); | ||
979 | |||
980 | return SZ_DMAWMB; | ||
981 | } | ||
982 | |||
983 | struct _arg_GO { | ||
984 | u8 chan; | ||
985 | u32 addr; | ||
986 | unsigned ns; | ||
987 | }; | ||
988 | |||
989 | static inline u32 _emit_GO(unsigned dry_run, u8 buf[], | ||
990 | const struct _arg_GO *arg) | ||
991 | { | ||
992 | u8 chan = arg->chan; | ||
993 | u32 addr = arg->addr; | ||
994 | unsigned ns = arg->ns; | ||
995 | |||
996 | if (dry_run) | ||
997 | return SZ_DMAGO; | ||
998 | |||
999 | buf[0] = CMD_DMAGO; | ||
1000 | buf[0] |= (ns << 1); | ||
1001 | |||
1002 | buf[1] = chan & 0x7; | ||
1003 | |||
1004 | *((u32 *)&buf[2]) = addr; | ||
1005 | |||
1006 | return SZ_DMAGO; | ||
1007 | } | ||
1008 | |||
1009 | #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) | ||
1010 | |||
1011 | /* Returns Time-Out */ | ||
1012 | static bool _until_dmac_idle(struct pl330_thread *thrd) | ||
1013 | { | ||
1014 | void __iomem *regs = thrd->dmac->pinfo->base; | ||
1015 | unsigned long loops = msecs_to_loops(5); | ||
1016 | |||
1017 | do { | ||
1018 | /* Until Manager is Idle */ | ||
1019 | if (!(readl(regs + DBGSTATUS) & DBG_BUSY)) | ||
1020 | break; | ||
1021 | |||
1022 | cpu_relax(); | ||
1023 | } while (--loops); | ||
1024 | |||
1025 | if (!loops) | ||
1026 | return true; | ||
1027 | |||
1028 | return false; | ||
1029 | } | ||
1030 | |||
1031 | static inline void _execute_DBGINSN(struct pl330_thread *thrd, | ||
1032 | u8 insn[], bool as_manager) | ||
1033 | { | ||
1034 | void __iomem *regs = thrd->dmac->pinfo->base; | ||
1035 | u32 val; | ||
1036 | |||
1037 | val = (insn[0] << 16) | (insn[1] << 24); | ||
1038 | if (!as_manager) { | ||
1039 | val |= (1 << 0); | ||
1040 | val |= (thrd->id << 8); /* Channel Number */ | ||
1041 | } | ||
1042 | writel(val, regs + DBGINST0); | ||
1043 | |||
1044 | val = *((u32 *)&insn[2]); | ||
1045 | writel(val, regs + DBGINST1); | ||
1046 | |||
1047 | /* If timed out due to halted state-machine */ | ||
1048 | if (_until_dmac_idle(thrd)) { | ||
1049 | dev_err(thrd->dmac->pinfo->dev, "DMAC halted!\n"); | ||
1050 | return; | ||
1051 | } | ||
1052 | |||
1053 | /* Get going */ | ||
1054 | writel(0, regs + DBGCMD); | ||
1055 | } | ||
1056 | |||
1057 | /* | ||
1058 | * Mark a _pl330_req as free. | ||
1059 | * We do it by writing DMAEND as the first instruction | ||
1060 | * because no valid request is going to have DMAEND as | ||
1061 | * its first instruction to execute. | ||
1062 | */ | ||
1063 | static void mark_free(struct pl330_thread *thrd, int idx) | ||
1064 | { | ||
1065 | struct _pl330_req *req = &thrd->req[idx]; | ||
1066 | |||
1067 | _emit_END(0, req->mc_cpu); | ||
1068 | req->mc_len = 0; | ||
1069 | |||
1070 | thrd->req_running = -1; | ||
1071 | } | ||
1072 | |||
1073 | static inline u32 _state(struct pl330_thread *thrd) | ||
1074 | { | ||
1075 | void __iomem *regs = thrd->dmac->pinfo->base; | ||
1076 | u32 val; | ||
1077 | |||
1078 | if (is_manager(thrd)) | ||
1079 | val = readl(regs + DS) & 0xf; | ||
1080 | else | ||
1081 | val = readl(regs + CS(thrd->id)) & 0xf; | ||
1082 | |||
1083 | switch (val) { | ||
1084 | case DS_ST_STOP: | ||
1085 | return PL330_STATE_STOPPED; | ||
1086 | case DS_ST_EXEC: | ||
1087 | return PL330_STATE_EXECUTING; | ||
1088 | case DS_ST_CMISS: | ||
1089 | return PL330_STATE_CACHEMISS; | ||
1090 | case DS_ST_UPDTPC: | ||
1091 | return PL330_STATE_UPDTPC; | ||
1092 | case DS_ST_WFE: | ||
1093 | return PL330_STATE_WFE; | ||
1094 | case DS_ST_FAULT: | ||
1095 | return PL330_STATE_FAULTING; | ||
1096 | case DS_ST_ATBRR: | ||
1097 | if (is_manager(thrd)) | ||
1098 | return PL330_STATE_INVALID; | ||
1099 | else | ||
1100 | return PL330_STATE_ATBARRIER; | ||
1101 | case DS_ST_QBUSY: | ||
1102 | if (is_manager(thrd)) | ||
1103 | return PL330_STATE_INVALID; | ||
1104 | else | ||
1105 | return PL330_STATE_QUEUEBUSY; | ||
1106 | case DS_ST_WFP: | ||
1107 | if (is_manager(thrd)) | ||
1108 | return PL330_STATE_INVALID; | ||
1109 | else | ||
1110 | return PL330_STATE_WFP; | ||
1111 | case DS_ST_KILL: | ||
1112 | if (is_manager(thrd)) | ||
1113 | return PL330_STATE_INVALID; | ||
1114 | else | ||
1115 | return PL330_STATE_KILLING; | ||
1116 | case DS_ST_CMPLT: | ||
1117 | if (is_manager(thrd)) | ||
1118 | return PL330_STATE_INVALID; | ||
1119 | else | ||
1120 | return PL330_STATE_COMPLETING; | ||
1121 | case DS_ST_FLTCMP: | ||
1122 | if (is_manager(thrd)) | ||
1123 | return PL330_STATE_INVALID; | ||
1124 | else | ||
1125 | return PL330_STATE_FAULT_COMPLETING; | ||
1126 | default: | ||
1127 | return PL330_STATE_INVALID; | ||
1128 | } | ||
1129 | } | ||
1130 | |||
1131 | static void _stop(struct pl330_thread *thrd) | ||
1132 | { | ||
1133 | void __iomem *regs = thrd->dmac->pinfo->base; | ||
1134 | u8 insn[6] = {0, 0, 0, 0, 0, 0}; | ||
1135 | |||
1136 | if (_state(thrd) == PL330_STATE_FAULT_COMPLETING) | ||
1137 | UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING); | ||
1138 | |||
1139 | /* Return if nothing needs to be done */ | ||
1140 | if (_state(thrd) == PL330_STATE_COMPLETING | ||
1141 | || _state(thrd) == PL330_STATE_KILLING | ||
1142 | || _state(thrd) == PL330_STATE_STOPPED) | ||
1143 | return; | ||
1144 | |||
1145 | _emit_KILL(0, insn); | ||
1146 | |||
1147 | /* Stop generating interrupts for SEV */ | ||
1148 | writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN); | ||
1149 | |||
1150 | _execute_DBGINSN(thrd, insn, is_manager(thrd)); | ||
1151 | } | ||
1152 | |||
1153 | /* Start doing req 'idx' of thread 'thrd' */ | ||
1154 | static bool _trigger(struct pl330_thread *thrd) | ||
1155 | { | ||
1156 | void __iomem *regs = thrd->dmac->pinfo->base; | ||
1157 | struct _pl330_req *req; | ||
1158 | struct pl330_req *r; | ||
1159 | struct _arg_GO go; | ||
1160 | unsigned ns; | ||
1161 | u8 insn[6] = {0, 0, 0, 0, 0, 0}; | ||
1162 | int idx; | ||
1163 | |||
1164 | /* Return if already ACTIVE */ | ||
1165 | if (_state(thrd) != PL330_STATE_STOPPED) | ||
1166 | return true; | ||
1167 | |||
1168 | idx = 1 - thrd->lstenq; | ||
1169 | if (!IS_FREE(&thrd->req[idx])) | ||
1170 | req = &thrd->req[idx]; | ||
1171 | else { | ||
1172 | idx = thrd->lstenq; | ||
1173 | if (!IS_FREE(&thrd->req[idx])) | ||
1174 | req = &thrd->req[idx]; | ||
1175 | else | ||
1176 | req = NULL; | ||
1177 | } | ||
1178 | |||
1179 | /* Return if no request */ | ||
1180 | if (!req || !req->r) | ||
1181 | return true; | ||
1182 | |||
1183 | r = req->r; | ||
1184 | |||
1185 | if (r->cfg) | ||
1186 | ns = r->cfg->nonsecure ? 1 : 0; | ||
1187 | else if (readl(regs + CS(thrd->id)) & CS_CNS) | ||
1188 | ns = 1; | ||
1189 | else | ||
1190 | ns = 0; | ||
1191 | |||
1192 | /* See 'Abort Sources' point-4 at Page 2-25 */ | ||
1193 | if (_manager_ns(thrd) && !ns) | ||
1194 | dev_info(thrd->dmac->pinfo->dev, "%s:%d Recipe for ABORT!\n", | ||
1195 | __func__, __LINE__); | ||
1196 | |||
1197 | go.chan = thrd->id; | ||
1198 | go.addr = req->mc_bus; | ||
1199 | go.ns = ns; | ||
1200 | _emit_GO(0, insn, &go); | ||
1201 | |||
1202 | /* Set to generate interrupts for SEV */ | ||
1203 | writel(readl(regs + INTEN) | (1 << thrd->ev), regs + INTEN); | ||
1204 | |||
1205 | /* Only manager can execute GO */ | ||
1206 | _execute_DBGINSN(thrd, insn, true); | ||
1207 | |||
1208 | thrd->req_running = idx; | ||
1209 | |||
1210 | return true; | ||
1211 | } | ||
1212 | |||
1213 | static bool _start(struct pl330_thread *thrd) | ||
1214 | { | ||
1215 | switch (_state(thrd)) { | ||
1216 | case PL330_STATE_FAULT_COMPLETING: | ||
1217 | UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING); | ||
1218 | |||
1219 | if (_state(thrd) == PL330_STATE_KILLING) | ||
1220 | UNTIL(thrd, PL330_STATE_STOPPED) | ||
1221 | |||
1222 | case PL330_STATE_FAULTING: | ||
1223 | _stop(thrd); | ||
1224 | |||
1225 | case PL330_STATE_KILLING: | ||
1226 | case PL330_STATE_COMPLETING: | ||
1227 | UNTIL(thrd, PL330_STATE_STOPPED) | ||
1228 | |||
1229 | case PL330_STATE_STOPPED: | ||
1230 | return _trigger(thrd); | ||
1231 | |||
1232 | case PL330_STATE_WFP: | ||
1233 | case PL330_STATE_QUEUEBUSY: | ||
1234 | case PL330_STATE_ATBARRIER: | ||
1235 | case PL330_STATE_UPDTPC: | ||
1236 | case PL330_STATE_CACHEMISS: | ||
1237 | case PL330_STATE_EXECUTING: | ||
1238 | return true; | ||
1239 | |||
1240 | case PL330_STATE_WFE: /* For RESUME, nothing yet */ | ||
1241 | default: | ||
1242 | return false; | ||
1243 | } | ||
1244 | } | ||
1245 | |||
1246 | static inline int _ldst_memtomem(unsigned dry_run, u8 buf[], | ||
1247 | const struct _xfer_spec *pxs, int cyc) | ||
1248 | { | ||
1249 | int off = 0; | ||
1250 | struct pl330_config *pcfg = pxs->r->cfg->pcfg; | ||
1251 | |||
1252 | /* check lock-up free version */ | ||
1253 | if (get_revision(pcfg->periph_id) >= PERIPH_REV_R1P0) { | ||
1254 | while (cyc--) { | ||
1255 | off += _emit_LD(dry_run, &buf[off], ALWAYS); | ||
1256 | off += _emit_ST(dry_run, &buf[off], ALWAYS); | ||
1257 | } | ||
1258 | } else { | ||
1259 | while (cyc--) { | ||
1260 | off += _emit_LD(dry_run, &buf[off], ALWAYS); | ||
1261 | off += _emit_RMB(dry_run, &buf[off]); | ||
1262 | off += _emit_ST(dry_run, &buf[off], ALWAYS); | ||
1263 | off += _emit_WMB(dry_run, &buf[off]); | ||
1264 | } | ||
1265 | } | ||
1266 | |||
1267 | return off; | ||
1268 | } | ||
1269 | |||
1270 | static inline int _ldst_devtomem(unsigned dry_run, u8 buf[], | ||
1271 | const struct _xfer_spec *pxs, int cyc) | ||
1272 | { | ||
1273 | int off = 0; | ||
1274 | |||
1275 | while (cyc--) { | ||
1276 | off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri); | ||
1277 | off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->r->peri); | ||
1278 | off += _emit_ST(dry_run, &buf[off], ALWAYS); | ||
1279 | off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri); | ||
1280 | } | ||
1281 | |||
1282 | return off; | ||
1283 | } | ||
1284 | |||
1285 | static inline int _ldst_memtodev(unsigned dry_run, u8 buf[], | ||
1286 | const struct _xfer_spec *pxs, int cyc) | ||
1287 | { | ||
1288 | int off = 0; | ||
1289 | |||
1290 | while (cyc--) { | ||
1291 | off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri); | ||
1292 | off += _emit_LD(dry_run, &buf[off], ALWAYS); | ||
1293 | off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->r->peri); | ||
1294 | off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri); | ||
1295 | } | ||
1296 | |||
1297 | return off; | ||
1298 | } | ||
1299 | |||
1300 | static int _bursts(unsigned dry_run, u8 buf[], | ||
1301 | const struct _xfer_spec *pxs, int cyc) | ||
1302 | { | ||
1303 | int off = 0; | ||
1304 | |||
1305 | switch (pxs->r->rqtype) { | ||
1306 | case MEMTODEV: | ||
1307 | off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc); | ||
1308 | break; | ||
1309 | case DEVTOMEM: | ||
1310 | off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc); | ||
1311 | break; | ||
1312 | case MEMTOMEM: | ||
1313 | off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc); | ||
1314 | break; | ||
1315 | default: | ||
1316 | off += 0x40000000; /* Scare off the Client */ | ||
1317 | break; | ||
1318 | } | ||
1319 | |||
1320 | return off; | ||
1321 | } | ||
1322 | |||
1323 | /* Returns bytes consumed and updates bursts */ | ||
1324 | static inline int _loop(unsigned dry_run, u8 buf[], | ||
1325 | unsigned long *bursts, const struct _xfer_spec *pxs) | ||
1326 | { | ||
1327 | int cyc, cycmax, szlp, szlpend, szbrst, off; | ||
1328 | unsigned lcnt0, lcnt1, ljmp0, ljmp1; | ||
1329 | struct _arg_LPEND lpend; | ||
1330 | |||
1331 | /* Max iterations possible in DMALP is 256 */ | ||
1332 | if (*bursts >= 256*256) { | ||
1333 | lcnt1 = 256; | ||
1334 | lcnt0 = 256; | ||
1335 | cyc = *bursts / lcnt1 / lcnt0; | ||
1336 | } else if (*bursts > 256) { | ||
1337 | lcnt1 = 256; | ||
1338 | lcnt0 = *bursts / lcnt1; | ||
1339 | cyc = 1; | ||
1340 | } else { | ||
1341 | lcnt1 = *bursts; | ||
1342 | lcnt0 = 0; | ||
1343 | cyc = 1; | ||
1344 | } | ||
1345 | |||
1346 | szlp = _emit_LP(1, buf, 0, 0); | ||
1347 | szbrst = _bursts(1, buf, pxs, 1); | ||
1348 | |||
1349 | lpend.cond = ALWAYS; | ||
1350 | lpend.forever = false; | ||
1351 | lpend.loop = 0; | ||
1352 | lpend.bjump = 0; | ||
1353 | szlpend = _emit_LPEND(1, buf, &lpend); | ||
1354 | |||
1355 | if (lcnt0) { | ||
1356 | szlp *= 2; | ||
1357 | szlpend *= 2; | ||
1358 | } | ||
1359 | |||
1360 | /* | ||
1361 | * Max bursts that we can unroll due to limit on the | ||
1362 | * size of backward jump that can be encoded in DMALPEND | ||
1363 | * which is 8-bits and hence 255 | ||
1364 | */ | ||
1365 | cycmax = (255 - (szlp + szlpend)) / szbrst; | ||
1366 | |||
1367 | cyc = (cycmax < cyc) ? cycmax : cyc; | ||
1368 | |||
1369 | off = 0; | ||
1370 | |||
1371 | if (lcnt0) { | ||
1372 | off += _emit_LP(dry_run, &buf[off], 0, lcnt0); | ||
1373 | ljmp0 = off; | ||
1374 | } | ||
1375 | |||
1376 | off += _emit_LP(dry_run, &buf[off], 1, lcnt1); | ||
1377 | ljmp1 = off; | ||
1378 | |||
1379 | off += _bursts(dry_run, &buf[off], pxs, cyc); | ||
1380 | |||
1381 | lpend.cond = ALWAYS; | ||
1382 | lpend.forever = false; | ||
1383 | lpend.loop = 1; | ||
1384 | lpend.bjump = off - ljmp1; | ||
1385 | off += _emit_LPEND(dry_run, &buf[off], &lpend); | ||
1386 | |||
1387 | if (lcnt0) { | ||
1388 | lpend.cond = ALWAYS; | ||
1389 | lpend.forever = false; | ||
1390 | lpend.loop = 0; | ||
1391 | lpend.bjump = off - ljmp0; | ||
1392 | off += _emit_LPEND(dry_run, &buf[off], &lpend); | ||
1393 | } | ||
1394 | |||
1395 | *bursts = lcnt1 * cyc; | ||
1396 | if (lcnt0) | ||
1397 | *bursts *= lcnt0; | ||
1398 | |||
1399 | return off; | ||
1400 | } | ||
1401 | |||
1402 | static inline int _setup_loops(unsigned dry_run, u8 buf[], | ||
1403 | const struct _xfer_spec *pxs) | ||
1404 | { | ||
1405 | struct pl330_xfer *x = pxs->x; | ||
1406 | u32 ccr = pxs->ccr; | ||
1407 | unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr); | ||
1408 | int off = 0; | ||
1409 | |||
1410 | while (bursts) { | ||
1411 | c = bursts; | ||
1412 | off += _loop(dry_run, &buf[off], &c, pxs); | ||
1413 | bursts -= c; | ||
1414 | } | ||
1415 | |||
1416 | return off; | ||
1417 | } | ||
1418 | |||
1419 | static inline int _setup_xfer(unsigned dry_run, u8 buf[], | ||
1420 | const struct _xfer_spec *pxs) | ||
1421 | { | ||
1422 | struct pl330_xfer *x = pxs->x; | ||
1423 | int off = 0; | ||
1424 | |||
1425 | /* DMAMOV SAR, x->src_addr */ | ||
1426 | off += _emit_MOV(dry_run, &buf[off], SAR, x->src_addr); | ||
1427 | /* DMAMOV DAR, x->dst_addr */ | ||
1428 | off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr); | ||
1429 | |||
1430 | /* Setup Loop(s) */ | ||
1431 | off += _setup_loops(dry_run, &buf[off], pxs); | ||
1432 | |||
1433 | return off; | ||
1434 | } | ||
1435 | |||
1436 | /* | ||
1437 | * A req is a sequence of one or more xfer units. | ||
1438 | * Returns the number of bytes taken to setup the MC for the req. | ||
1439 | */ | ||
1440 | static int _setup_req(unsigned dry_run, struct pl330_thread *thrd, | ||
1441 | unsigned index, struct _xfer_spec *pxs) | ||
1442 | { | ||
1443 | struct _pl330_req *req = &thrd->req[index]; | ||
1444 | struct pl330_xfer *x; | ||
1445 | u8 *buf = req->mc_cpu; | ||
1446 | int off = 0; | ||
1447 | |||
1448 | PL330_DBGMC_START(req->mc_bus); | ||
1449 | |||
1450 | /* DMAMOV CCR, ccr */ | ||
1451 | off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr); | ||
1452 | |||
1453 | x = pxs->r->x; | ||
1454 | do { | ||
1455 | /* Error if xfer length is not aligned at burst size */ | ||
1456 | if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr))) | ||
1457 | return -EINVAL; | ||
1458 | |||
1459 | pxs->x = x; | ||
1460 | off += _setup_xfer(dry_run, &buf[off], pxs); | ||
1461 | |||
1462 | x = x->next; | ||
1463 | } while (x); | ||
1464 | |||
1465 | /* DMASEV peripheral/event */ | ||
1466 | off += _emit_SEV(dry_run, &buf[off], thrd->ev); | ||
1467 | /* DMAEND */ | ||
1468 | off += _emit_END(dry_run, &buf[off]); | ||
1469 | |||
1470 | return off; | ||
1471 | } | ||
1472 | |||
1473 | static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc) | ||
1474 | { | ||
1475 | u32 ccr = 0; | ||
1476 | |||
1477 | if (rqc->src_inc) | ||
1478 | ccr |= CC_SRCINC; | ||
1479 | |||
1480 | if (rqc->dst_inc) | ||
1481 | ccr |= CC_DSTINC; | ||
1482 | |||
1483 | /* We set same protection levels for Src and DST for now */ | ||
1484 | if (rqc->privileged) | ||
1485 | ccr |= CC_SRCPRI | CC_DSTPRI; | ||
1486 | if (rqc->nonsecure) | ||
1487 | ccr |= CC_SRCNS | CC_DSTNS; | ||
1488 | if (rqc->insnaccess) | ||
1489 | ccr |= CC_SRCIA | CC_DSTIA; | ||
1490 | |||
1491 | ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT); | ||
1492 | ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT); | ||
1493 | |||
1494 | ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT); | ||
1495 | ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT); | ||
1496 | |||
1497 | ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT); | ||
1498 | ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT); | ||
1499 | |||
1500 | ccr |= (rqc->swap << CC_SWAP_SHFT); | ||
1501 | |||
1502 | return ccr; | ||
1503 | } | ||
1504 | |||
1505 | static inline bool _is_valid(u32 ccr) | ||
1506 | { | ||
1507 | enum pl330_dstcachectrl dcctl; | ||
1508 | enum pl330_srccachectrl scctl; | ||
1509 | |||
1510 | dcctl = (ccr >> CC_DSTCCTRL_SHFT) & CC_DRCCCTRL_MASK; | ||
1511 | scctl = (ccr >> CC_SRCCCTRL_SHFT) & CC_SRCCCTRL_MASK; | ||
1512 | |||
1513 | if (dcctl == DINVALID1 || dcctl == DINVALID2 | ||
1514 | || scctl == SINVALID1 || scctl == SINVALID2) | ||
1515 | return false; | ||
1516 | else | ||
1517 | return true; | ||
1518 | } | ||
1519 | |||
1520 | /* | ||
1521 | * Submit a list of xfers after which the client wants notification. | ||
1522 | * Client is not notified after each xfer unit, just once after all | ||
1523 | * xfer units are done or some error occurs. | ||
1524 | */ | ||
1525 | static int pl330_submit_req(void *ch_id, struct pl330_req *r) | ||
1526 | { | ||
1527 | struct pl330_thread *thrd = ch_id; | ||
1528 | struct pl330_dmac *pl330; | ||
1529 | struct pl330_info *pi; | ||
1530 | struct _xfer_spec xs; | ||
1531 | unsigned long flags; | ||
1532 | void __iomem *regs; | ||
1533 | unsigned idx; | ||
1534 | u32 ccr; | ||
1535 | int ret = 0; | ||
1536 | |||
1537 | /* No Req or Unacquired Channel or DMAC */ | ||
1538 | if (!r || !thrd || thrd->free) | ||
1539 | return -EINVAL; | ||
1540 | |||
1541 | pl330 = thrd->dmac; | ||
1542 | pi = pl330->pinfo; | ||
1543 | regs = pi->base; | ||
1544 | |||
1545 | if (pl330->state == DYING | ||
1546 | || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) { | ||
1547 | dev_info(thrd->dmac->pinfo->dev, "%s:%d\n", | ||
1548 | __func__, __LINE__); | ||
1549 | return -EAGAIN; | ||
1550 | } | ||
1551 | |||
1552 | /* If request for non-existing peripheral */ | ||
1553 | if (r->rqtype != MEMTOMEM && r->peri >= pi->pcfg.num_peri) { | ||
1554 | dev_info(thrd->dmac->pinfo->dev, | ||
1555 | "%s:%d Invalid peripheral(%u)!\n", | ||
1556 | __func__, __LINE__, r->peri); | ||
1557 | return -EINVAL; | ||
1558 | } | ||
1559 | |||
1560 | spin_lock_irqsave(&pl330->lock, flags); | ||
1561 | |||
1562 | if (_queue_full(thrd)) { | ||
1563 | ret = -EAGAIN; | ||
1564 | goto xfer_exit; | ||
1565 | } | ||
1566 | |||
1567 | |||
1568 | /* Use last settings, if not provided */ | ||
1569 | if (r->cfg) { | ||
1570 | /* Prefer Secure Channel */ | ||
1571 | if (!_manager_ns(thrd)) | ||
1572 | r->cfg->nonsecure = 0; | ||
1573 | else | ||
1574 | r->cfg->nonsecure = 1; | ||
1575 | |||
1576 | ccr = _prepare_ccr(r->cfg); | ||
1577 | } else { | ||
1578 | ccr = readl(regs + CC(thrd->id)); | ||
1579 | } | ||
1580 | |||
1581 | /* If this req doesn't have valid xfer settings */ | ||
1582 | if (!_is_valid(ccr)) { | ||
1583 | ret = -EINVAL; | ||
1584 | dev_info(thrd->dmac->pinfo->dev, "%s:%d Invalid CCR(%x)!\n", | ||
1585 | __func__, __LINE__, ccr); | ||
1586 | goto xfer_exit; | ||
1587 | } | ||
1588 | |||
1589 | idx = IS_FREE(&thrd->req[0]) ? 0 : 1; | ||
1590 | |||
1591 | xs.ccr = ccr; | ||
1592 | xs.r = r; | ||
1593 | |||
1594 | /* First dry run to check if req is acceptable */ | ||
1595 | ret = _setup_req(1, thrd, idx, &xs); | ||
1596 | if (ret < 0) | ||
1597 | goto xfer_exit; | ||
1598 | |||
1599 | if (ret > pi->mcbufsz / 2) { | ||
1600 | dev_info(thrd->dmac->pinfo->dev, | ||
1601 | "%s:%d Trying increasing mcbufsz\n", | ||
1602 | __func__, __LINE__); | ||
1603 | ret = -ENOMEM; | ||
1604 | goto xfer_exit; | ||
1605 | } | ||
1606 | |||
1607 | /* Hook the request */ | ||
1608 | thrd->lstenq = idx; | ||
1609 | thrd->req[idx].mc_len = _setup_req(0, thrd, idx, &xs); | ||
1610 | thrd->req[idx].r = r; | ||
1611 | |||
1612 | ret = 0; | ||
1613 | |||
1614 | xfer_exit: | ||
1615 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1616 | |||
1617 | return ret; | ||
1618 | } | ||
1619 | |||
1620 | static void pl330_dotask(unsigned long data) | ||
1621 | { | ||
1622 | struct pl330_dmac *pl330 = (struct pl330_dmac *) data; | ||
1623 | struct pl330_info *pi = pl330->pinfo; | ||
1624 | unsigned long flags; | ||
1625 | int i; | ||
1626 | |||
1627 | spin_lock_irqsave(&pl330->lock, flags); | ||
1628 | |||
1629 | /* The DMAC itself gone nuts */ | ||
1630 | if (pl330->dmac_tbd.reset_dmac) { | ||
1631 | pl330->state = DYING; | ||
1632 | /* Reset the manager too */ | ||
1633 | pl330->dmac_tbd.reset_mngr = true; | ||
1634 | /* Clear the reset flag */ | ||
1635 | pl330->dmac_tbd.reset_dmac = false; | ||
1636 | } | ||
1637 | |||
1638 | if (pl330->dmac_tbd.reset_mngr) { | ||
1639 | _stop(pl330->manager); | ||
1640 | /* Reset all channels */ | ||
1641 | pl330->dmac_tbd.reset_chan = (1 << pi->pcfg.num_chan) - 1; | ||
1642 | /* Clear the reset flag */ | ||
1643 | pl330->dmac_tbd.reset_mngr = false; | ||
1644 | } | ||
1645 | |||
1646 | for (i = 0; i < pi->pcfg.num_chan; i++) { | ||
1647 | |||
1648 | if (pl330->dmac_tbd.reset_chan & (1 << i)) { | ||
1649 | struct pl330_thread *thrd = &pl330->channels[i]; | ||
1650 | void __iomem *regs = pi->base; | ||
1651 | enum pl330_op_err err; | ||
1652 | |||
1653 | _stop(thrd); | ||
1654 | |||
1655 | if (readl(regs + FSC) & (1 << thrd->id)) | ||
1656 | err = PL330_ERR_FAIL; | ||
1657 | else | ||
1658 | err = PL330_ERR_ABORT; | ||
1659 | |||
1660 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1661 | |||
1662 | _callback(thrd->req[1 - thrd->lstenq].r, err); | ||
1663 | _callback(thrd->req[thrd->lstenq].r, err); | ||
1664 | |||
1665 | spin_lock_irqsave(&pl330->lock, flags); | ||
1666 | |||
1667 | thrd->req[0].r = NULL; | ||
1668 | thrd->req[1].r = NULL; | ||
1669 | mark_free(thrd, 0); | ||
1670 | mark_free(thrd, 1); | ||
1671 | |||
1672 | /* Clear the reset flag */ | ||
1673 | pl330->dmac_tbd.reset_chan &= ~(1 << i); | ||
1674 | } | ||
1675 | } | ||
1676 | |||
1677 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1678 | |||
1679 | return; | ||
1680 | } | ||
1681 | |||
1682 | /* Returns 1 if state was updated, 0 otherwise */ | ||
1683 | static int pl330_update(const struct pl330_info *pi) | ||
1684 | { | ||
1685 | struct pl330_req *rqdone, *tmp; | ||
1686 | struct pl330_dmac *pl330; | ||
1687 | unsigned long flags; | ||
1688 | void __iomem *regs; | ||
1689 | u32 val; | ||
1690 | int id, ev, ret = 0; | ||
1691 | |||
1692 | if (!pi || !pi->pl330_data) | ||
1693 | return 0; | ||
1694 | |||
1695 | regs = pi->base; | ||
1696 | pl330 = pi->pl330_data; | ||
1697 | |||
1698 | spin_lock_irqsave(&pl330->lock, flags); | ||
1699 | |||
1700 | val = readl(regs + FSM) & 0x1; | ||
1701 | if (val) | ||
1702 | pl330->dmac_tbd.reset_mngr = true; | ||
1703 | else | ||
1704 | pl330->dmac_tbd.reset_mngr = false; | ||
1705 | |||
1706 | val = readl(regs + FSC) & ((1 << pi->pcfg.num_chan) - 1); | ||
1707 | pl330->dmac_tbd.reset_chan |= val; | ||
1708 | if (val) { | ||
1709 | int i = 0; | ||
1710 | while (i < pi->pcfg.num_chan) { | ||
1711 | if (val & (1 << i)) { | ||
1712 | dev_info(pi->dev, | ||
1713 | "Reset Channel-%d\t CS-%x FTC-%x\n", | ||
1714 | i, readl(regs + CS(i)), | ||
1715 | readl(regs + FTC(i))); | ||
1716 | _stop(&pl330->channels[i]); | ||
1717 | } | ||
1718 | i++; | ||
1719 | } | ||
1720 | } | ||
1721 | |||
1722 | /* Check which event happened i.e, thread notified */ | ||
1723 | val = readl(regs + ES); | ||
1724 | if (pi->pcfg.num_events < 32 | ||
1725 | && val & ~((1 << pi->pcfg.num_events) - 1)) { | ||
1726 | pl330->dmac_tbd.reset_dmac = true; | ||
1727 | dev_err(pi->dev, "%s:%d Unexpected!\n", __func__, __LINE__); | ||
1728 | ret = 1; | ||
1729 | goto updt_exit; | ||
1730 | } | ||
1731 | |||
1732 | for (ev = 0; ev < pi->pcfg.num_events; ev++) { | ||
1733 | if (val & (1 << ev)) { /* Event occurred */ | ||
1734 | struct pl330_thread *thrd; | ||
1735 | u32 inten = readl(regs + INTEN); | ||
1736 | int active; | ||
1737 | |||
1738 | /* Clear the event */ | ||
1739 | if (inten & (1 << ev)) | ||
1740 | writel(1 << ev, regs + INTCLR); | ||
1741 | |||
1742 | ret = 1; | ||
1743 | |||
1744 | id = pl330->events[ev]; | ||
1745 | |||
1746 | thrd = &pl330->channels[id]; | ||
1747 | |||
1748 | active = thrd->req_running; | ||
1749 | if (active == -1) /* Aborted */ | ||
1750 | continue; | ||
1751 | |||
1752 | /* Detach the req */ | ||
1753 | rqdone = thrd->req[active].r; | ||
1754 | thrd->req[active].r = NULL; | ||
1755 | |||
1756 | mark_free(thrd, active); | ||
1757 | |||
1758 | /* Get going again ASAP */ | ||
1759 | _start(thrd); | ||
1760 | |||
1761 | /* For now, just make a list of callbacks to be done */ | ||
1762 | list_add_tail(&rqdone->rqd, &pl330->req_done); | ||
1763 | } | ||
1764 | } | ||
1765 | |||
1766 | /* Now that we are in no hurry, do the callbacks */ | ||
1767 | list_for_each_entry_safe(rqdone, tmp, &pl330->req_done, rqd) { | ||
1768 | list_del(&rqdone->rqd); | ||
1769 | |||
1770 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1771 | _callback(rqdone, PL330_ERR_NONE); | ||
1772 | spin_lock_irqsave(&pl330->lock, flags); | ||
1773 | } | ||
1774 | |||
1775 | updt_exit: | ||
1776 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1777 | |||
1778 | if (pl330->dmac_tbd.reset_dmac | ||
1779 | || pl330->dmac_tbd.reset_mngr | ||
1780 | || pl330->dmac_tbd.reset_chan) { | ||
1781 | ret = 1; | ||
1782 | tasklet_schedule(&pl330->tasks); | ||
1783 | } | ||
1784 | |||
1785 | return ret; | ||
1786 | } | ||
1787 | |||
1788 | static int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op) | ||
1789 | { | ||
1790 | struct pl330_thread *thrd = ch_id; | ||
1791 | struct pl330_dmac *pl330; | ||
1792 | unsigned long flags; | ||
1793 | int ret = 0, active; | ||
1794 | |||
1795 | if (!thrd || thrd->free || thrd->dmac->state == DYING) | ||
1796 | return -EINVAL; | ||
1797 | |||
1798 | pl330 = thrd->dmac; | ||
1799 | active = thrd->req_running; | ||
1800 | |||
1801 | spin_lock_irqsave(&pl330->lock, flags); | ||
1802 | |||
1803 | switch (op) { | ||
1804 | case PL330_OP_FLUSH: | ||
1805 | /* Make sure the channel is stopped */ | ||
1806 | _stop(thrd); | ||
1807 | |||
1808 | thrd->req[0].r = NULL; | ||
1809 | thrd->req[1].r = NULL; | ||
1810 | mark_free(thrd, 0); | ||
1811 | mark_free(thrd, 1); | ||
1812 | break; | ||
1813 | |||
1814 | case PL330_OP_ABORT: | ||
1815 | /* Make sure the channel is stopped */ | ||
1816 | _stop(thrd); | ||
1817 | |||
1818 | /* ABORT is only for the active req */ | ||
1819 | if (active == -1) | ||
1820 | break; | ||
1821 | |||
1822 | thrd->req[active].r = NULL; | ||
1823 | mark_free(thrd, active); | ||
1824 | |||
1825 | /* Start the next */ | ||
1826 | case PL330_OP_START: | ||
1827 | if ((active == -1) && !_start(thrd)) | ||
1828 | ret = -EIO; | ||
1829 | break; | ||
1830 | |||
1831 | default: | ||
1832 | ret = -EINVAL; | ||
1833 | } | ||
1834 | |||
1835 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1836 | return ret; | ||
1837 | } | ||
1838 | |||
1839 | /* Reserve an event */ | ||
1840 | static inline int _alloc_event(struct pl330_thread *thrd) | ||
1841 | { | ||
1842 | struct pl330_dmac *pl330 = thrd->dmac; | ||
1843 | struct pl330_info *pi = pl330->pinfo; | ||
1844 | int ev; | ||
1845 | |||
1846 | for (ev = 0; ev < pi->pcfg.num_events; ev++) | ||
1847 | if (pl330->events[ev] == -1) { | ||
1848 | pl330->events[ev] = thrd->id; | ||
1849 | return ev; | ||
1850 | } | ||
1851 | |||
1852 | return -1; | ||
1853 | } | ||
1854 | |||
1855 | static bool _chan_ns(const struct pl330_info *pi, int i) | ||
1856 | { | ||
1857 | return pi->pcfg.irq_ns & (1 << i); | ||
1858 | } | ||
1859 | |||
1860 | /* Upon success, returns IdentityToken for the | ||
1861 | * allocated channel, NULL otherwise. | ||
1862 | */ | ||
1863 | static void *pl330_request_channel(const struct pl330_info *pi) | ||
1864 | { | ||
1865 | struct pl330_thread *thrd = NULL; | ||
1866 | struct pl330_dmac *pl330; | ||
1867 | unsigned long flags; | ||
1868 | int chans, i; | ||
1869 | |||
1870 | if (!pi || !pi->pl330_data) | ||
1871 | return NULL; | ||
1872 | |||
1873 | pl330 = pi->pl330_data; | ||
1874 | |||
1875 | if (pl330->state == DYING) | ||
1876 | return NULL; | ||
1877 | |||
1878 | chans = pi->pcfg.num_chan; | ||
1879 | |||
1880 | spin_lock_irqsave(&pl330->lock, flags); | ||
1881 | |||
1882 | for (i = 0; i < chans; i++) { | ||
1883 | thrd = &pl330->channels[i]; | ||
1884 | if ((thrd->free) && (!_manager_ns(thrd) || | ||
1885 | _chan_ns(pi, i))) { | ||
1886 | thrd->ev = _alloc_event(thrd); | ||
1887 | if (thrd->ev >= 0) { | ||
1888 | thrd->free = false; | ||
1889 | thrd->lstenq = 1; | ||
1890 | thrd->req[0].r = NULL; | ||
1891 | mark_free(thrd, 0); | ||
1892 | thrd->req[1].r = NULL; | ||
1893 | mark_free(thrd, 1); | ||
1894 | break; | ||
1895 | } | ||
1896 | } | ||
1897 | thrd = NULL; | ||
1898 | } | ||
1899 | |||
1900 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1901 | |||
1902 | return thrd; | ||
1903 | } | ||
1904 | |||
1905 | /* Release an event */ | ||
1906 | static inline void _free_event(struct pl330_thread *thrd, int ev) | ||
1907 | { | ||
1908 | struct pl330_dmac *pl330 = thrd->dmac; | ||
1909 | struct pl330_info *pi = pl330->pinfo; | ||
1910 | |||
1911 | /* If the event is valid and was held by the thread */ | ||
1912 | if (ev >= 0 && ev < pi->pcfg.num_events | ||
1913 | && pl330->events[ev] == thrd->id) | ||
1914 | pl330->events[ev] = -1; | ||
1915 | } | ||
1916 | |||
1917 | static void pl330_release_channel(void *ch_id) | ||
1918 | { | ||
1919 | struct pl330_thread *thrd = ch_id; | ||
1920 | struct pl330_dmac *pl330; | ||
1921 | unsigned long flags; | ||
1922 | |||
1923 | if (!thrd || thrd->free) | ||
1924 | return; | ||
1925 | |||
1926 | _stop(thrd); | ||
1927 | |||
1928 | _callback(thrd->req[1 - thrd->lstenq].r, PL330_ERR_ABORT); | ||
1929 | _callback(thrd->req[thrd->lstenq].r, PL330_ERR_ABORT); | ||
1930 | |||
1931 | pl330 = thrd->dmac; | ||
1932 | |||
1933 | spin_lock_irqsave(&pl330->lock, flags); | ||
1934 | _free_event(thrd, thrd->ev); | ||
1935 | thrd->free = true; | ||
1936 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1937 | } | ||
1938 | |||
1939 | /* Initialize the structure for PL330 configuration, that can be used | ||
1940 | * by the client driver the make best use of the DMAC | ||
1941 | */ | ||
1942 | static void read_dmac_config(struct pl330_info *pi) | ||
1943 | { | ||
1944 | void __iomem *regs = pi->base; | ||
1945 | u32 val; | ||
1946 | |||
1947 | val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT; | ||
1948 | val &= CRD_DATA_WIDTH_MASK; | ||
1949 | pi->pcfg.data_bus_width = 8 * (1 << val); | ||
1950 | |||
1951 | val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT; | ||
1952 | val &= CRD_DATA_BUFF_MASK; | ||
1953 | pi->pcfg.data_buf_dep = val + 1; | ||
1954 | |||
1955 | val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT; | ||
1956 | val &= CR0_NUM_CHANS_MASK; | ||
1957 | val += 1; | ||
1958 | pi->pcfg.num_chan = val; | ||
1959 | |||
1960 | val = readl(regs + CR0); | ||
1961 | if (val & CR0_PERIPH_REQ_SET) { | ||
1962 | val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK; | ||
1963 | val += 1; | ||
1964 | pi->pcfg.num_peri = val; | ||
1965 | pi->pcfg.peri_ns = readl(regs + CR4); | ||
1966 | } else { | ||
1967 | pi->pcfg.num_peri = 0; | ||
1968 | } | ||
1969 | |||
1970 | val = readl(regs + CR0); | ||
1971 | if (val & CR0_BOOT_MAN_NS) | ||
1972 | pi->pcfg.mode |= DMAC_MODE_NS; | ||
1973 | else | ||
1974 | pi->pcfg.mode &= ~DMAC_MODE_NS; | ||
1975 | |||
1976 | val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT; | ||
1977 | val &= CR0_NUM_EVENTS_MASK; | ||
1978 | val += 1; | ||
1979 | pi->pcfg.num_events = val; | ||
1980 | |||
1981 | pi->pcfg.irq_ns = readl(regs + CR3); | ||
1982 | |||
1983 | pi->pcfg.periph_id = get_id(pi, PERIPH_ID); | ||
1984 | pi->pcfg.pcell_id = get_id(pi, PCELL_ID); | ||
1985 | } | ||
1986 | |||
1987 | static inline void _reset_thread(struct pl330_thread *thrd) | ||
1988 | { | ||
1989 | struct pl330_dmac *pl330 = thrd->dmac; | ||
1990 | struct pl330_info *pi = pl330->pinfo; | ||
1991 | |||
1992 | thrd->req[0].mc_cpu = pl330->mcode_cpu | ||
1993 | + (thrd->id * pi->mcbufsz); | ||
1994 | thrd->req[0].mc_bus = pl330->mcode_bus | ||
1995 | + (thrd->id * pi->mcbufsz); | ||
1996 | thrd->req[0].r = NULL; | ||
1997 | mark_free(thrd, 0); | ||
1998 | |||
1999 | thrd->req[1].mc_cpu = thrd->req[0].mc_cpu | ||
2000 | + pi->mcbufsz / 2; | ||
2001 | thrd->req[1].mc_bus = thrd->req[0].mc_bus | ||
2002 | + pi->mcbufsz / 2; | ||
2003 | thrd->req[1].r = NULL; | ||
2004 | mark_free(thrd, 1); | ||
2005 | } | ||
2006 | |||
2007 | static int dmac_alloc_threads(struct pl330_dmac *pl330) | ||
2008 | { | ||
2009 | struct pl330_info *pi = pl330->pinfo; | ||
2010 | int chans = pi->pcfg.num_chan; | ||
2011 | struct pl330_thread *thrd; | ||
2012 | int i; | ||
2013 | |||
2014 | /* Allocate 1 Manager and 'chans' Channel threads */ | ||
2015 | pl330->channels = kzalloc((1 + chans) * sizeof(*thrd), | ||
2016 | GFP_KERNEL); | ||
2017 | if (!pl330->channels) | ||
2018 | return -ENOMEM; | ||
2019 | |||
2020 | /* Init Channel threads */ | ||
2021 | for (i = 0; i < chans; i++) { | ||
2022 | thrd = &pl330->channels[i]; | ||
2023 | thrd->id = i; | ||
2024 | thrd->dmac = pl330; | ||
2025 | _reset_thread(thrd); | ||
2026 | thrd->free = true; | ||
2027 | } | ||
2028 | |||
2029 | /* MANAGER is indexed at the end */ | ||
2030 | thrd = &pl330->channels[chans]; | ||
2031 | thrd->id = chans; | ||
2032 | thrd->dmac = pl330; | ||
2033 | thrd->free = false; | ||
2034 | pl330->manager = thrd; | ||
2035 | |||
2036 | return 0; | ||
2037 | } | ||
2038 | |||
2039 | static int dmac_alloc_resources(struct pl330_dmac *pl330) | ||
2040 | { | ||
2041 | struct pl330_info *pi = pl330->pinfo; | ||
2042 | int chans = pi->pcfg.num_chan; | ||
2043 | int ret; | ||
2044 | |||
2045 | /* | ||
2046 | * Alloc MicroCode buffer for 'chans' Channel threads. | ||
2047 | * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN) | ||
2048 | */ | ||
2049 | pl330->mcode_cpu = dma_alloc_coherent(pi->dev, | ||
2050 | chans * pi->mcbufsz, | ||
2051 | &pl330->mcode_bus, GFP_KERNEL); | ||
2052 | if (!pl330->mcode_cpu) { | ||
2053 | dev_err(pi->dev, "%s:%d Can't allocate memory!\n", | ||
2054 | __func__, __LINE__); | ||
2055 | return -ENOMEM; | ||
2056 | } | ||
2057 | |||
2058 | ret = dmac_alloc_threads(pl330); | ||
2059 | if (ret) { | ||
2060 | dev_err(pi->dev, "%s:%d Can't to create channels for DMAC!\n", | ||
2061 | __func__, __LINE__); | ||
2062 | dma_free_coherent(pi->dev, | ||
2063 | chans * pi->mcbufsz, | ||
2064 | pl330->mcode_cpu, pl330->mcode_bus); | ||
2065 | return ret; | ||
2066 | } | ||
2067 | |||
2068 | return 0; | ||
2069 | } | ||
2070 | |||
2071 | static int pl330_add(struct pl330_info *pi) | ||
2072 | { | ||
2073 | struct pl330_dmac *pl330; | ||
2074 | void __iomem *regs; | ||
2075 | int i, ret; | ||
2076 | |||
2077 | if (!pi || !pi->dev) | ||
2078 | return -EINVAL; | ||
2079 | |||
2080 | /* If already added */ | ||
2081 | if (pi->pl330_data) | ||
2082 | return -EINVAL; | ||
2083 | |||
2084 | /* | ||
2085 | * If the SoC can perform reset on the DMAC, then do it | ||
2086 | * before reading its configuration. | ||
2087 | */ | ||
2088 | if (pi->dmac_reset) | ||
2089 | pi->dmac_reset(pi); | ||
2090 | |||
2091 | regs = pi->base; | ||
2092 | |||
2093 | /* Check if we can handle this DMAC */ | ||
2094 | if ((get_id(pi, PERIPH_ID) & 0xfffff) != PERIPH_ID_VAL | ||
2095 | || get_id(pi, PCELL_ID) != PCELL_ID_VAL) { | ||
2096 | dev_err(pi->dev, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n", | ||
2097 | get_id(pi, PERIPH_ID), get_id(pi, PCELL_ID)); | ||
2098 | return -EINVAL; | ||
2099 | } | ||
2100 | |||
2101 | /* Read the configuration of the DMAC */ | ||
2102 | read_dmac_config(pi); | ||
2103 | |||
2104 | if (pi->pcfg.num_events == 0) { | ||
2105 | dev_err(pi->dev, "%s:%d Can't work without events!\n", | ||
2106 | __func__, __LINE__); | ||
2107 | return -EINVAL; | ||
2108 | } | ||
2109 | |||
2110 | pl330 = kzalloc(sizeof(*pl330), GFP_KERNEL); | ||
2111 | if (!pl330) { | ||
2112 | dev_err(pi->dev, "%s:%d Can't allocate memory!\n", | ||
2113 | __func__, __LINE__); | ||
2114 | return -ENOMEM; | ||
2115 | } | ||
2116 | |||
2117 | /* Assign the info structure and private data */ | ||
2118 | pl330->pinfo = pi; | ||
2119 | pi->pl330_data = pl330; | ||
2120 | |||
2121 | spin_lock_init(&pl330->lock); | ||
2122 | |||
2123 | INIT_LIST_HEAD(&pl330->req_done); | ||
2124 | |||
2125 | /* Use default MC buffer size if not provided */ | ||
2126 | if (!pi->mcbufsz) | ||
2127 | pi->mcbufsz = MCODE_BUFF_PER_REQ * 2; | ||
2128 | |||
2129 | /* Mark all events as free */ | ||
2130 | for (i = 0; i < pi->pcfg.num_events; i++) | ||
2131 | pl330->events[i] = -1; | ||
2132 | |||
2133 | /* Allocate resources needed by the DMAC */ | ||
2134 | ret = dmac_alloc_resources(pl330); | ||
2135 | if (ret) { | ||
2136 | dev_err(pi->dev, "Unable to create channels for DMAC\n"); | ||
2137 | kfree(pl330); | ||
2138 | return ret; | ||
2139 | } | ||
2140 | |||
2141 | tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330); | ||
2142 | |||
2143 | pl330->state = INIT; | ||
2144 | |||
2145 | return 0; | ||
2146 | } | ||
2147 | |||
2148 | static int dmac_free_threads(struct pl330_dmac *pl330) | ||
2149 | { | ||
2150 | struct pl330_info *pi = pl330->pinfo; | ||
2151 | int chans = pi->pcfg.num_chan; | ||
2152 | struct pl330_thread *thrd; | ||
2153 | int i; | ||
2154 | |||
2155 | /* Release Channel threads */ | ||
2156 | for (i = 0; i < chans; i++) { | ||
2157 | thrd = &pl330->channels[i]; | ||
2158 | pl330_release_channel((void *)thrd); | ||
2159 | } | ||
2160 | |||
2161 | /* Free memory */ | ||
2162 | kfree(pl330->channels); | ||
2163 | |||
2164 | return 0; | ||
2165 | } | ||
2166 | |||
2167 | static void dmac_free_resources(struct pl330_dmac *pl330) | ||
2168 | { | ||
2169 | struct pl330_info *pi = pl330->pinfo; | ||
2170 | int chans = pi->pcfg.num_chan; | ||
2171 | |||
2172 | dmac_free_threads(pl330); | ||
2173 | |||
2174 | dma_free_coherent(pi->dev, chans * pi->mcbufsz, | ||
2175 | pl330->mcode_cpu, pl330->mcode_bus); | ||
2176 | } | ||
2177 | |||
2178 | static void pl330_del(struct pl330_info *pi) | ||
2179 | { | ||
2180 | struct pl330_dmac *pl330; | ||
2181 | |||
2182 | if (!pi || !pi->pl330_data) | ||
2183 | return; | ||
2184 | |||
2185 | pl330 = pi->pl330_data; | ||
2186 | |||
2187 | pl330->state = UNINIT; | ||
2188 | |||
2189 | tasklet_kill(&pl330->tasks); | ||
2190 | |||
2191 | /* Free DMAC resources */ | ||
2192 | dmac_free_resources(pl330); | ||
2193 | |||
2194 | kfree(pl330); | ||
2195 | pi->pl330_data = NULL; | ||
2196 | } | ||
2197 | |||
2198 | /* forward declaration */ | ||
2199 | static struct amba_driver pl330_driver; | ||
2200 | |||
2201 | static inline struct dma_pl330_chan * | 107 | static inline struct dma_pl330_chan * |
2202 | to_pchan(struct dma_chan *ch) | 108 | to_pchan(struct dma_chan *ch) |
2203 | { | 109 | { |
@@ -2217,9 +123,12 @@ static inline void free_desc_list(struct list_head *list) | |||
2217 | { | 123 | { |
2218 | struct dma_pl330_dmac *pdmac; | 124 | struct dma_pl330_dmac *pdmac; |
2219 | struct dma_pl330_desc *desc; | 125 | struct dma_pl330_desc *desc; |
2220 | struct dma_pl330_chan *pch = NULL; | 126 | struct dma_pl330_chan *pch; |
2221 | unsigned long flags; | 127 | unsigned long flags; |
2222 | 128 | ||
129 | if (list_empty(list)) | ||
130 | return; | ||
131 | |||
2223 | /* Finish off the work list */ | 132 | /* Finish off the work list */ |
2224 | list_for_each_entry(desc, list, node) { | 133 | list_for_each_entry(desc, list, node) { |
2225 | dma_async_tx_callback callback; | 134 | dma_async_tx_callback callback; |
@@ -2236,10 +145,6 @@ static inline void free_desc_list(struct list_head *list) | |||
2236 | desc->pchan = NULL; | 145 | desc->pchan = NULL; |
2237 | } | 146 | } |
2238 | 147 | ||
2239 | /* pch will be unset if list was empty */ | ||
2240 | if (!pch) | ||
2241 | return; | ||
2242 | |||
2243 | pdmac = pch->dmac; | 148 | pdmac = pch->dmac; |
2244 | 149 | ||
2245 | spin_lock_irqsave(&pdmac->pool_lock, flags); | 150 | spin_lock_irqsave(&pdmac->pool_lock, flags); |
@@ -2247,32 +152,6 @@ static inline void free_desc_list(struct list_head *list) | |||
2247 | spin_unlock_irqrestore(&pdmac->pool_lock, flags); | 152 | spin_unlock_irqrestore(&pdmac->pool_lock, flags); |
2248 | } | 153 | } |
2249 | 154 | ||
2250 | static inline void handle_cyclic_desc_list(struct list_head *list) | ||
2251 | { | ||
2252 | struct dma_pl330_desc *desc; | ||
2253 | struct dma_pl330_chan *pch = NULL; | ||
2254 | unsigned long flags; | ||
2255 | |||
2256 | list_for_each_entry(desc, list, node) { | ||
2257 | dma_async_tx_callback callback; | ||
2258 | |||
2259 | /* Change status to reload it */ | ||
2260 | desc->status = PREP; | ||
2261 | pch = desc->pchan; | ||
2262 | callback = desc->txd.callback; | ||
2263 | if (callback) | ||
2264 | callback(desc->txd.callback_param); | ||
2265 | } | ||
2266 | |||
2267 | /* pch will be unset if list was empty */ | ||
2268 | if (!pch) | ||
2269 | return; | ||
2270 | |||
2271 | spin_lock_irqsave(&pch->lock, flags); | ||
2272 | list_splice_tail_init(list, &pch->work_list); | ||
2273 | spin_unlock_irqrestore(&pch->lock, flags); | ||
2274 | } | ||
2275 | |||
2276 | static inline void fill_queue(struct dma_pl330_chan *pch) | 155 | static inline void fill_queue(struct dma_pl330_chan *pch) |
2277 | { | 156 | { |
2278 | struct dma_pl330_desc *desc; | 157 | struct dma_pl330_desc *desc; |
@@ -2314,8 +193,7 @@ static void pl330_tasklet(unsigned long data) | |||
2314 | /* Pick up ripe tomatoes */ | 193 | /* Pick up ripe tomatoes */ |
2315 | list_for_each_entry_safe(desc, _dt, &pch->work_list, node) | 194 | list_for_each_entry_safe(desc, _dt, &pch->work_list, node) |
2316 | if (desc->status == DONE) { | 195 | if (desc->status == DONE) { |
2317 | if (!pch->cyclic) | 196 | pch->completed = desc->txd.cookie; |
2318 | dma_cookie_complete(&desc->txd); | ||
2319 | list_move_tail(&desc->node, &list); | 197 | list_move_tail(&desc->node, &list); |
2320 | } | 198 | } |
2321 | 199 | ||
@@ -2327,10 +205,7 @@ static void pl330_tasklet(unsigned long data) | |||
2327 | 205 | ||
2328 | spin_unlock_irqrestore(&pch->lock, flags); | 206 | spin_unlock_irqrestore(&pch->lock, flags); |
2329 | 207 | ||
2330 | if (pch->cyclic) | 208 | free_desc_list(&list); |
2331 | handle_cyclic_desc_list(&list); | ||
2332 | else | ||
2333 | free_desc_list(&list); | ||
2334 | } | 209 | } |
2335 | 210 | ||
2336 | static void dma_pl330_rqcb(void *token, enum pl330_op_err err) | 211 | static void dma_pl330_rqcb(void *token, enum pl330_op_err err) |
@@ -2352,32 +227,6 @@ static void dma_pl330_rqcb(void *token, enum pl330_op_err err) | |||
2352 | tasklet_schedule(&pch->task); | 227 | tasklet_schedule(&pch->task); |
2353 | } | 228 | } |
2354 | 229 | ||
2355 | bool pl330_filter(struct dma_chan *chan, void *param) | ||
2356 | { | ||
2357 | u8 *peri_id; | ||
2358 | |||
2359 | if (chan->device->dev->driver != &pl330_driver.drv) | ||
2360 | return false; | ||
2361 | |||
2362 | #ifdef CONFIG_OF | ||
2363 | if (chan->device->dev->of_node) { | ||
2364 | const __be32 *prop_value; | ||
2365 | phandle phandle; | ||
2366 | struct device_node *node; | ||
2367 | |||
2368 | prop_value = ((struct property *)param)->value; | ||
2369 | phandle = be32_to_cpup(prop_value++); | ||
2370 | node = of_find_node_by_phandle(phandle); | ||
2371 | return ((chan->private == node) && | ||
2372 | (chan->chan_id == be32_to_cpup(prop_value))); | ||
2373 | } | ||
2374 | #endif | ||
2375 | |||
2376 | peri_id = chan->private; | ||
2377 | return *peri_id == (unsigned)param; | ||
2378 | } | ||
2379 | EXPORT_SYMBOL(pl330_filter); | ||
2380 | |||
2381 | static int pl330_alloc_chan_resources(struct dma_chan *chan) | 230 | static int pl330_alloc_chan_resources(struct dma_chan *chan) |
2382 | { | 231 | { |
2383 | struct dma_pl330_chan *pch = to_pchan(chan); | 232 | struct dma_pl330_chan *pch = to_pchan(chan); |
@@ -2386,13 +235,12 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan) | |||
2386 | 235 | ||
2387 | spin_lock_irqsave(&pch->lock, flags); | 236 | spin_lock_irqsave(&pch->lock, flags); |
2388 | 237 | ||
2389 | dma_cookie_init(chan); | 238 | pch->completed = chan->cookie = 1; |
2390 | pch->cyclic = false; | ||
2391 | 239 | ||
2392 | pch->pl330_chid = pl330_request_channel(&pdmac->pif); | 240 | pch->pl330_chid = pl330_request_channel(&pdmac->pif); |
2393 | if (!pch->pl330_chid) { | 241 | if (!pch->pl330_chid) { |
2394 | spin_unlock_irqrestore(&pch->lock, flags); | 242 | spin_unlock_irqrestore(&pch->lock, flags); |
2395 | return -ENOMEM; | 243 | return 0; |
2396 | } | 244 | } |
2397 | 245 | ||
2398 | tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch); | 246 | tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch); |
@@ -2405,51 +253,25 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan) | |||
2405 | static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) | 253 | static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) |
2406 | { | 254 | { |
2407 | struct dma_pl330_chan *pch = to_pchan(chan); | 255 | struct dma_pl330_chan *pch = to_pchan(chan); |
2408 | struct dma_pl330_desc *desc, *_dt; | 256 | struct dma_pl330_desc *desc; |
2409 | unsigned long flags; | 257 | unsigned long flags; |
2410 | struct dma_pl330_dmac *pdmac = pch->dmac; | ||
2411 | struct dma_slave_config *slave_config; | ||
2412 | LIST_HEAD(list); | ||
2413 | 258 | ||
2414 | switch (cmd) { | 259 | /* Only supports DMA_TERMINATE_ALL */ |
2415 | case DMA_TERMINATE_ALL: | 260 | if (cmd != DMA_TERMINATE_ALL) |
2416 | spin_lock_irqsave(&pch->lock, flags); | 261 | return -ENXIO; |
2417 | 262 | ||
2418 | /* FLUSH the PL330 Channel thread */ | 263 | spin_lock_irqsave(&pch->lock, flags); |
2419 | pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH); | ||
2420 | 264 | ||
2421 | /* Mark all desc done */ | 265 | /* FLUSH the PL330 Channel thread */ |
2422 | list_for_each_entry_safe(desc, _dt, &pch->work_list , node) { | 266 | pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH); |
2423 | desc->status = DONE; | ||
2424 | list_move_tail(&desc->node, &list); | ||
2425 | } | ||
2426 | 267 | ||
2427 | list_splice_tail_init(&list, &pdmac->desc_pool); | 268 | /* Mark all desc done */ |
2428 | spin_unlock_irqrestore(&pch->lock, flags); | 269 | list_for_each_entry(desc, &pch->work_list, node) |
2429 | break; | 270 | desc->status = DONE; |
2430 | case DMA_SLAVE_CONFIG: | 271 | |
2431 | slave_config = (struct dma_slave_config *)arg; | 272 | spin_unlock_irqrestore(&pch->lock, flags); |
2432 | 273 | ||
2433 | if (slave_config->direction == DMA_MEM_TO_DEV) { | 274 | pl330_tasklet((unsigned long) pch); |
2434 | if (slave_config->dst_addr) | ||
2435 | pch->fifo_addr = slave_config->dst_addr; | ||
2436 | if (slave_config->dst_addr_width) | ||
2437 | pch->burst_sz = __ffs(slave_config->dst_addr_width); | ||
2438 | if (slave_config->dst_maxburst) | ||
2439 | pch->burst_len = slave_config->dst_maxburst; | ||
2440 | } else if (slave_config->direction == DMA_DEV_TO_MEM) { | ||
2441 | if (slave_config->src_addr) | ||
2442 | pch->fifo_addr = slave_config->src_addr; | ||
2443 | if (slave_config->src_addr_width) | ||
2444 | pch->burst_sz = __ffs(slave_config->src_addr_width); | ||
2445 | if (slave_config->src_maxburst) | ||
2446 | pch->burst_len = slave_config->src_maxburst; | ||
2447 | } | ||
2448 | break; | ||
2449 | default: | ||
2450 | dev_err(pch->dmac->pif.dev, "Not supported command.\n"); | ||
2451 | return -ENXIO; | ||
2452 | } | ||
2453 | 275 | ||
2454 | return 0; | 276 | return 0; |
2455 | } | 277 | } |
@@ -2466,9 +288,6 @@ static void pl330_free_chan_resources(struct dma_chan *chan) | |||
2466 | pl330_release_channel(pch->pl330_chid); | 288 | pl330_release_channel(pch->pl330_chid); |
2467 | pch->pl330_chid = NULL; | 289 | pch->pl330_chid = NULL; |
2468 | 290 | ||
2469 | if (pch->cyclic) | ||
2470 | list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool); | ||
2471 | |||
2472 | spin_unlock_irqrestore(&pch->lock, flags); | 291 | spin_unlock_irqrestore(&pch->lock, flags); |
2473 | } | 292 | } |
2474 | 293 | ||
@@ -2476,7 +295,18 @@ static enum dma_status | |||
2476 | pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | 295 | pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, |
2477 | struct dma_tx_state *txstate) | 296 | struct dma_tx_state *txstate) |
2478 | { | 297 | { |
2479 | return dma_cookie_status(chan, cookie, txstate); | 298 | struct dma_pl330_chan *pch = to_pchan(chan); |
299 | dma_cookie_t last_done, last_used; | ||
300 | int ret; | ||
301 | |||
302 | last_done = pch->completed; | ||
303 | last_used = chan->cookie; | ||
304 | |||
305 | ret = dma_async_is_complete(cookie, last_done, last_used); | ||
306 | |||
307 | dma_set_tx_state(txstate, last_done, last_used, 0); | ||
308 | |||
309 | return ret; | ||
2480 | } | 310 | } |
2481 | 311 | ||
2482 | static void pl330_issue_pending(struct dma_chan *chan) | 312 | static void pl330_issue_pending(struct dma_chan *chan) |
@@ -2499,16 +329,26 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) | |||
2499 | spin_lock_irqsave(&pch->lock, flags); | 329 | spin_lock_irqsave(&pch->lock, flags); |
2500 | 330 | ||
2501 | /* Assign cookies to all nodes */ | 331 | /* Assign cookies to all nodes */ |
332 | cookie = tx->chan->cookie; | ||
333 | |||
2502 | while (!list_empty(&last->node)) { | 334 | while (!list_empty(&last->node)) { |
2503 | desc = list_entry(last->node.next, struct dma_pl330_desc, node); | 335 | desc = list_entry(last->node.next, struct dma_pl330_desc, node); |
2504 | 336 | ||
2505 | dma_cookie_assign(&desc->txd); | 337 | if (++cookie < 0) |
338 | cookie = 1; | ||
339 | desc->txd.cookie = cookie; | ||
2506 | 340 | ||
2507 | list_move_tail(&desc->node, &pch->work_list); | 341 | list_move_tail(&desc->node, &pch->work_list); |
2508 | } | 342 | } |
2509 | 343 | ||
2510 | cookie = dma_cookie_assign(&last->txd); | 344 | if (++cookie < 0) |
345 | cookie = 1; | ||
346 | last->txd.cookie = cookie; | ||
347 | |||
2511 | list_add_tail(&last->node, &pch->work_list); | 348 | list_add_tail(&last->node, &pch->work_list); |
349 | |||
350 | tx->chan->cookie = cookie; | ||
351 | |||
2512 | spin_unlock_irqrestore(&pch->lock, flags); | 352 | spin_unlock_irqrestore(&pch->lock, flags); |
2513 | 353 | ||
2514 | return cookie; | 354 | return cookie; |
@@ -2532,7 +372,7 @@ static inline void _init_desc(struct dma_pl330_desc *desc) | |||
2532 | } | 372 | } |
2533 | 373 | ||
2534 | /* Returns the number of descriptors added to the DMAC pool */ | 374 | /* Returns the number of descriptors added to the DMAC pool */ |
2535 | static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count) | 375 | int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count) |
2536 | { | 376 | { |
2537 | struct dma_pl330_desc *desc; | 377 | struct dma_pl330_desc *desc; |
2538 | unsigned long flags; | 378 | unsigned long flags; |
@@ -2586,7 +426,7 @@ pluck_desc(struct dma_pl330_dmac *pdmac) | |||
2586 | static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) | 426 | static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) |
2587 | { | 427 | { |
2588 | struct dma_pl330_dmac *pdmac = pch->dmac; | 428 | struct dma_pl330_dmac *pdmac = pch->dmac; |
2589 | u8 *peri_id = pch->chan.private; | 429 | struct dma_pl330_peri *peri = pch->chan.private; |
2590 | struct dma_pl330_desc *desc; | 430 | struct dma_pl330_desc *desc; |
2591 | 431 | ||
2592 | /* Pluck one desc from the pool of DMAC */ | 432 | /* Pluck one desc from the pool of DMAC */ |
@@ -2611,8 +451,13 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) | |||
2611 | desc->txd.cookie = 0; | 451 | desc->txd.cookie = 0; |
2612 | async_tx_ack(&desc->txd); | 452 | async_tx_ack(&desc->txd); |
2613 | 453 | ||
2614 | desc->req.peri = peri_id ? pch->chan.chan_id : 0; | 454 | if (peri) { |
2615 | desc->rqcfg.pcfg = &pch->dmac->pif.pcfg; | 455 | desc->req.rqtype = peri->rqtype; |
456 | desc->req.peri = peri->peri_id; | ||
457 | } else { | ||
458 | desc->req.rqtype = MEMTOMEM; | ||
459 | desc->req.peri = 0; | ||
460 | } | ||
2616 | 461 | ||
2617 | dma_async_tx_descriptor_init(&desc->txd, &pch->chan); | 462 | dma_async_tx_descriptor_init(&desc->txd, &pch->chan); |
2618 | 463 | ||
@@ -2679,66 +524,22 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) | |||
2679 | return burst_len; | 524 | return burst_len; |
2680 | } | 525 | } |
2681 | 526 | ||
2682 | static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( | ||
2683 | struct dma_chan *chan, dma_addr_t dma_addr, size_t len, | ||
2684 | size_t period_len, enum dma_transfer_direction direction, | ||
2685 | unsigned long flags, void *context) | ||
2686 | { | ||
2687 | struct dma_pl330_desc *desc; | ||
2688 | struct dma_pl330_chan *pch = to_pchan(chan); | ||
2689 | dma_addr_t dst; | ||
2690 | dma_addr_t src; | ||
2691 | |||
2692 | desc = pl330_get_desc(pch); | ||
2693 | if (!desc) { | ||
2694 | dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n", | ||
2695 | __func__, __LINE__); | ||
2696 | return NULL; | ||
2697 | } | ||
2698 | |||
2699 | switch (direction) { | ||
2700 | case DMA_MEM_TO_DEV: | ||
2701 | desc->rqcfg.src_inc = 1; | ||
2702 | desc->rqcfg.dst_inc = 0; | ||
2703 | desc->req.rqtype = MEMTODEV; | ||
2704 | src = dma_addr; | ||
2705 | dst = pch->fifo_addr; | ||
2706 | break; | ||
2707 | case DMA_DEV_TO_MEM: | ||
2708 | desc->rqcfg.src_inc = 0; | ||
2709 | desc->rqcfg.dst_inc = 1; | ||
2710 | desc->req.rqtype = DEVTOMEM; | ||
2711 | src = pch->fifo_addr; | ||
2712 | dst = dma_addr; | ||
2713 | break; | ||
2714 | default: | ||
2715 | dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n", | ||
2716 | __func__, __LINE__); | ||
2717 | return NULL; | ||
2718 | } | ||
2719 | |||
2720 | desc->rqcfg.brst_size = pch->burst_sz; | ||
2721 | desc->rqcfg.brst_len = 1; | ||
2722 | |||
2723 | pch->cyclic = true; | ||
2724 | |||
2725 | fill_px(&desc->px, dst, src, period_len); | ||
2726 | |||
2727 | return &desc->txd; | ||
2728 | } | ||
2729 | |||
2730 | static struct dma_async_tx_descriptor * | 527 | static struct dma_async_tx_descriptor * |
2731 | pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, | 528 | pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, |
2732 | dma_addr_t src, size_t len, unsigned long flags) | 529 | dma_addr_t src, size_t len, unsigned long flags) |
2733 | { | 530 | { |
2734 | struct dma_pl330_desc *desc; | 531 | struct dma_pl330_desc *desc; |
2735 | struct dma_pl330_chan *pch = to_pchan(chan); | 532 | struct dma_pl330_chan *pch = to_pchan(chan); |
533 | struct dma_pl330_peri *peri = chan->private; | ||
2736 | struct pl330_info *pi; | 534 | struct pl330_info *pi; |
2737 | int burst; | 535 | int burst; |
2738 | 536 | ||
2739 | if (unlikely(!pch || !len)) | 537 | if (unlikely(!pch || !len)) |
2740 | return NULL; | 538 | return NULL; |
2741 | 539 | ||
540 | if (peri && peri->rqtype != MEMTOMEM) | ||
541 | return NULL; | ||
542 | |||
2742 | pi = &pch->dmac->pif; | 543 | pi = &pch->dmac->pif; |
2743 | 544 | ||
2744 | desc = __pl330_prep_dma_memcpy(pch, dst, src, len); | 545 | desc = __pl330_prep_dma_memcpy(pch, dst, src, len); |
@@ -2747,7 +548,6 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, | |||
2747 | 548 | ||
2748 | desc->rqcfg.src_inc = 1; | 549 | desc->rqcfg.src_inc = 1; |
2749 | desc->rqcfg.dst_inc = 1; | 550 | desc->rqcfg.dst_inc = 1; |
2750 | desc->req.rqtype = MEMTOMEM; | ||
2751 | 551 | ||
2752 | /* Select max possible burst size */ | 552 | /* Select max possible burst size */ |
2753 | burst = pi->pcfg.data_bus_width / 8; | 553 | burst = pi->pcfg.data_bus_width / 8; |
@@ -2771,20 +571,32 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, | |||
2771 | 571 | ||
2772 | static struct dma_async_tx_descriptor * | 572 | static struct dma_async_tx_descriptor * |
2773 | pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 573 | pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
2774 | unsigned int sg_len, enum dma_transfer_direction direction, | 574 | unsigned int sg_len, enum dma_data_direction direction, |
2775 | unsigned long flg, void *context) | 575 | unsigned long flg) |
2776 | { | 576 | { |
2777 | struct dma_pl330_desc *first, *desc = NULL; | 577 | struct dma_pl330_desc *first, *desc = NULL; |
2778 | struct dma_pl330_chan *pch = to_pchan(chan); | 578 | struct dma_pl330_chan *pch = to_pchan(chan); |
579 | struct dma_pl330_peri *peri = chan->private; | ||
2779 | struct scatterlist *sg; | 580 | struct scatterlist *sg; |
2780 | unsigned long flags; | 581 | unsigned long flags; |
2781 | int i; | 582 | int i, burst_size; |
2782 | dma_addr_t addr; | 583 | dma_addr_t addr; |
2783 | 584 | ||
2784 | if (unlikely(!pch || !sgl || !sg_len)) | 585 | if (unlikely(!pch || !sgl || !sg_len || !peri)) |
586 | return NULL; | ||
587 | |||
588 | /* Make sure the direction is consistent */ | ||
589 | if ((direction == DMA_TO_DEVICE && | ||
590 | peri->rqtype != MEMTODEV) || | ||
591 | (direction == DMA_FROM_DEVICE && | ||
592 | peri->rqtype != DEVTOMEM)) { | ||
593 | dev_err(pch->dmac->pif.dev, "%s:%d Invalid Direction\n", | ||
594 | __func__, __LINE__); | ||
2785 | return NULL; | 595 | return NULL; |
596 | } | ||
2786 | 597 | ||
2787 | addr = pch->fifo_addr; | 598 | addr = peri->fifo_addr; |
599 | burst_size = peri->burst_sz; | ||
2788 | 600 | ||
2789 | first = NULL; | 601 | first = NULL; |
2790 | 602 | ||
@@ -2820,21 +632,19 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
2820 | else | 632 | else |
2821 | list_add_tail(&desc->node, &first->node); | 633 | list_add_tail(&desc->node, &first->node); |
2822 | 634 | ||
2823 | if (direction == DMA_MEM_TO_DEV) { | 635 | if (direction == DMA_TO_DEVICE) { |
2824 | desc->rqcfg.src_inc = 1; | 636 | desc->rqcfg.src_inc = 1; |
2825 | desc->rqcfg.dst_inc = 0; | 637 | desc->rqcfg.dst_inc = 0; |
2826 | desc->req.rqtype = MEMTODEV; | ||
2827 | fill_px(&desc->px, | 638 | fill_px(&desc->px, |
2828 | addr, sg_dma_address(sg), sg_dma_len(sg)); | 639 | addr, sg_dma_address(sg), sg_dma_len(sg)); |
2829 | } else { | 640 | } else { |
2830 | desc->rqcfg.src_inc = 0; | 641 | desc->rqcfg.src_inc = 0; |
2831 | desc->rqcfg.dst_inc = 1; | 642 | desc->rqcfg.dst_inc = 1; |
2832 | desc->req.rqtype = DEVTOMEM; | ||
2833 | fill_px(&desc->px, | 643 | fill_px(&desc->px, |
2834 | sg_dma_address(sg), addr, sg_dma_len(sg)); | 644 | sg_dma_address(sg), addr, sg_dma_len(sg)); |
2835 | } | 645 | } |
2836 | 646 | ||
2837 | desc->rqcfg.brst_size = pch->burst_sz; | 647 | desc->rqcfg.brst_size = burst_size; |
2838 | desc->rqcfg.brst_len = 1; | 648 | desc->rqcfg.brst_len = 1; |
2839 | } | 649 | } |
2840 | 650 | ||
@@ -2851,7 +661,7 @@ static irqreturn_t pl330_irq_handler(int irq, void *data) | |||
2851 | return IRQ_NONE; | 661 | return IRQ_NONE; |
2852 | } | 662 | } |
2853 | 663 | ||
2854 | static int | 664 | static int __devinit |
2855 | pl330_probe(struct amba_device *adev, const struct amba_id *id) | 665 | pl330_probe(struct amba_device *adev, const struct amba_id *id) |
2856 | { | 666 | { |
2857 | struct dma_pl330_platdata *pdat; | 667 | struct dma_pl330_platdata *pdat; |
@@ -2886,8 +696,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2886 | goto probe_err1; | 696 | goto probe_err1; |
2887 | } | 697 | } |
2888 | 698 | ||
2889 | amba_set_drvdata(adev, pdmac); | ||
2890 | |||
2891 | irq = adev->irq[0]; | 699 | irq = adev->irq[0]; |
2892 | ret = request_irq(irq, pl330_irq_handler, 0, | 700 | ret = request_irq(irq, pl330_irq_handler, 0, |
2893 | dev_name(&adev->dev), pi); | 701 | dev_name(&adev->dev), pi); |
@@ -2909,51 +717,49 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2909 | INIT_LIST_HEAD(&pd->channels); | 717 | INIT_LIST_HEAD(&pd->channels); |
2910 | 718 | ||
2911 | /* Initialize channel parameters */ | 719 | /* Initialize channel parameters */ |
2912 | if (pdat) | 720 | num_chan = max(pdat ? pdat->nr_valid_peri : 0, (u8)pi->pcfg.num_chan); |
2913 | num_chan = max_t(int, pdat->nr_valid_peri, pi->pcfg.num_chan); | ||
2914 | else | ||
2915 | num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan); | ||
2916 | |||
2917 | pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); | 721 | pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); |
2918 | if (!pdmac->peripherals) { | ||
2919 | ret = -ENOMEM; | ||
2920 | dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n"); | ||
2921 | goto probe_err4; | ||
2922 | } | ||
2923 | 722 | ||
2924 | for (i = 0; i < num_chan; i++) { | 723 | for (i = 0; i < num_chan; i++) { |
2925 | pch = &pdmac->peripherals[i]; | 724 | pch = &pdmac->peripherals[i]; |
2926 | if (!adev->dev.of_node) | 725 | if (pdat) { |
2927 | pch->chan.private = pdat ? &pdat->peri_id[i] : NULL; | 726 | struct dma_pl330_peri *peri = &pdat->peri[i]; |
2928 | else | 727 | |
2929 | pch->chan.private = adev->dev.of_node; | 728 | switch (peri->rqtype) { |
729 | case MEMTOMEM: | ||
730 | dma_cap_set(DMA_MEMCPY, pd->cap_mask); | ||
731 | break; | ||
732 | case MEMTODEV: | ||
733 | case DEVTOMEM: | ||
734 | dma_cap_set(DMA_SLAVE, pd->cap_mask); | ||
735 | break; | ||
736 | default: | ||
737 | dev_err(&adev->dev, "DEVTODEV Not Supported\n"); | ||
738 | continue; | ||
739 | } | ||
740 | pch->chan.private = peri; | ||
741 | } else { | ||
742 | dma_cap_set(DMA_MEMCPY, pd->cap_mask); | ||
743 | pch->chan.private = NULL; | ||
744 | } | ||
2930 | 745 | ||
2931 | INIT_LIST_HEAD(&pch->work_list); | 746 | INIT_LIST_HEAD(&pch->work_list); |
2932 | spin_lock_init(&pch->lock); | 747 | spin_lock_init(&pch->lock); |
2933 | pch->pl330_chid = NULL; | 748 | pch->pl330_chid = NULL; |
2934 | pch->chan.device = pd; | 749 | pch->chan.device = pd; |
750 | pch->chan.chan_id = i; | ||
2935 | pch->dmac = pdmac; | 751 | pch->dmac = pdmac; |
2936 | 752 | ||
2937 | /* Add the channel to the DMAC list */ | 753 | /* Add the channel to the DMAC list */ |
754 | pd->chancnt++; | ||
2938 | list_add_tail(&pch->chan.device_node, &pd->channels); | 755 | list_add_tail(&pch->chan.device_node, &pd->channels); |
2939 | } | 756 | } |
2940 | 757 | ||
2941 | pd->dev = &adev->dev; | 758 | pd->dev = &adev->dev; |
2942 | if (pdat) { | ||
2943 | pd->cap_mask = pdat->cap_mask; | ||
2944 | } else { | ||
2945 | dma_cap_set(DMA_MEMCPY, pd->cap_mask); | ||
2946 | if (pi->pcfg.num_peri) { | ||
2947 | dma_cap_set(DMA_SLAVE, pd->cap_mask); | ||
2948 | dma_cap_set(DMA_CYCLIC, pd->cap_mask); | ||
2949 | dma_cap_set(DMA_PRIVATE, pd->cap_mask); | ||
2950 | } | ||
2951 | } | ||
2952 | 759 | ||
2953 | pd->device_alloc_chan_resources = pl330_alloc_chan_resources; | 760 | pd->device_alloc_chan_resources = pl330_alloc_chan_resources; |
2954 | pd->device_free_chan_resources = pl330_free_chan_resources; | 761 | pd->device_free_chan_resources = pl330_free_chan_resources; |
2955 | pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy; | 762 | pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy; |
2956 | pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic; | ||
2957 | pd->device_tx_status = pl330_tx_status; | 763 | pd->device_tx_status = pl330_tx_status; |
2958 | pd->device_prep_slave_sg = pl330_prep_slave_sg; | 764 | pd->device_prep_slave_sg = pl330_prep_slave_sg; |
2959 | pd->device_control = pl330_control; | 765 | pd->device_control = pl330_control; |
@@ -2965,6 +771,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2965 | goto probe_err4; | 771 | goto probe_err4; |
2966 | } | 772 | } |
2967 | 773 | ||
774 | amba_set_drvdata(adev, pdmac); | ||
775 | |||
2968 | dev_info(&adev->dev, | 776 | dev_info(&adev->dev, |
2969 | "Loaded driver for PL330 DMAC-%d\n", adev->periphid); | 777 | "Loaded driver for PL330 DMAC-%d\n", adev->periphid); |
2970 | dev_info(&adev->dev, | 778 | dev_info(&adev->dev, |
@@ -2988,7 +796,7 @@ probe_err1: | |||
2988 | return ret; | 796 | return ret; |
2989 | } | 797 | } |
2990 | 798 | ||
2991 | static int pl330_remove(struct amba_device *adev) | 799 | static int __devexit pl330_remove(struct amba_device *adev) |
2992 | { | 800 | { |
2993 | struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev); | 801 | struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev); |
2994 | struct dma_pl330_chan *pch, *_p; | 802 | struct dma_pl330_chan *pch, *_p; |
@@ -3038,8 +846,6 @@ static struct amba_id pl330_ids[] = { | |||
3038 | { 0, 0 }, | 846 | { 0, 0 }, |
3039 | }; | 847 | }; |
3040 | 848 | ||
3041 | MODULE_DEVICE_TABLE(amba, pl330_ids); | ||
3042 | |||
3043 | static struct amba_driver pl330_driver = { | 849 | static struct amba_driver pl330_driver = { |
3044 | .drv = { | 850 | .drv = { |
3045 | .owner = THIS_MODULE, | 851 | .owner = THIS_MODULE, |
@@ -3050,7 +856,18 @@ static struct amba_driver pl330_driver = { | |||
3050 | .remove = pl330_remove, | 856 | .remove = pl330_remove, |
3051 | }; | 857 | }; |
3052 | 858 | ||
3053 | module_amba_driver(pl330_driver); | 859 | static int __init pl330_init(void) |
860 | { | ||
861 | return amba_driver_register(&pl330_driver); | ||
862 | } | ||
863 | module_init(pl330_init); | ||
864 | |||
865 | static void __exit pl330_exit(void) | ||
866 | { | ||
867 | amba_driver_unregister(&pl330_driver); | ||
868 | return; | ||
869 | } | ||
870 | module_exit(pl330_exit); | ||
3054 | 871 | ||
3055 | MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>"); | 872 | MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>"); |
3056 | MODULE_DESCRIPTION("API Driver for PL330 DMAC"); | 873 | MODULE_DESCRIPTION("API Driver for PL330 DMAC"); |
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 5d3d95569a1..fc457a7e883 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c | |||
@@ -46,7 +46,6 @@ | |||
46 | #include <asm/dcr.h> | 46 | #include <asm/dcr.h> |
47 | #include <asm/dcr-regs.h> | 47 | #include <asm/dcr-regs.h> |
48 | #include "adma.h" | 48 | #include "adma.h" |
49 | #include "../dmaengine.h" | ||
50 | 49 | ||
51 | enum ppc_adma_init_code { | 50 | enum ppc_adma_init_code { |
52 | PPC_ADMA_INIT_OK = 0, | 51 | PPC_ADMA_INIT_OK = 0, |
@@ -1931,7 +1930,7 @@ static void __ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan) | |||
1931 | if (end_of_chain && slot_cnt) { | 1930 | if (end_of_chain && slot_cnt) { |
1932 | /* Should wait for ZeroSum completion */ | 1931 | /* Should wait for ZeroSum completion */ |
1933 | if (cookie > 0) | 1932 | if (cookie > 0) |
1934 | chan->common.completed_cookie = cookie; | 1933 | chan->completed_cookie = cookie; |
1935 | return; | 1934 | return; |
1936 | } | 1935 | } |
1937 | 1936 | ||
@@ -1961,7 +1960,7 @@ static void __ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan) | |||
1961 | BUG_ON(!seen_current); | 1960 | BUG_ON(!seen_current); |
1962 | 1961 | ||
1963 | if (cookie > 0) { | 1962 | if (cookie > 0) { |
1964 | chan->common.completed_cookie = cookie; | 1963 | chan->completed_cookie = cookie; |
1965 | pr_debug("\tcompleted cookie %d\n", cookie); | 1964 | pr_debug("\tcompleted cookie %d\n", cookie); |
1966 | } | 1965 | } |
1967 | 1966 | ||
@@ -2151,6 +2150,22 @@ static int ppc440spe_adma_alloc_chan_resources(struct dma_chan *chan) | |||
2151 | } | 2150 | } |
2152 | 2151 | ||
2153 | /** | 2152 | /** |
2153 | * ppc440spe_desc_assign_cookie - assign a cookie | ||
2154 | */ | ||
2155 | static dma_cookie_t ppc440spe_desc_assign_cookie( | ||
2156 | struct ppc440spe_adma_chan *chan, | ||
2157 | struct ppc440spe_adma_desc_slot *desc) | ||
2158 | { | ||
2159 | dma_cookie_t cookie = chan->common.cookie; | ||
2160 | |||
2161 | cookie++; | ||
2162 | if (cookie < 0) | ||
2163 | cookie = 1; | ||
2164 | chan->common.cookie = desc->async_tx.cookie = cookie; | ||
2165 | return cookie; | ||
2166 | } | ||
2167 | |||
2168 | /** | ||
2154 | * ppc440spe_rxor_set_region_data - | 2169 | * ppc440spe_rxor_set_region_data - |
2155 | */ | 2170 | */ |
2156 | static void ppc440spe_rxor_set_region(struct ppc440spe_adma_desc_slot *desc, | 2171 | static void ppc440spe_rxor_set_region(struct ppc440spe_adma_desc_slot *desc, |
@@ -2220,7 +2235,8 @@ static dma_cookie_t ppc440spe_adma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
2220 | slots_per_op = group_start->slots_per_op; | 2235 | slots_per_op = group_start->slots_per_op; |
2221 | 2236 | ||
2222 | spin_lock_bh(&chan->lock); | 2237 | spin_lock_bh(&chan->lock); |
2223 | cookie = dma_cookie_assign(tx); | 2238 | |
2239 | cookie = ppc440spe_desc_assign_cookie(chan, sw_desc); | ||
2224 | 2240 | ||
2225 | if (unlikely(list_empty(&chan->chain))) { | 2241 | if (unlikely(list_empty(&chan->chain))) { |
2226 | /* first peer */ | 2242 | /* first peer */ |
@@ -3928,16 +3944,28 @@ static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan, | |||
3928 | dma_cookie_t cookie, struct dma_tx_state *txstate) | 3944 | dma_cookie_t cookie, struct dma_tx_state *txstate) |
3929 | { | 3945 | { |
3930 | struct ppc440spe_adma_chan *ppc440spe_chan; | 3946 | struct ppc440spe_adma_chan *ppc440spe_chan; |
3947 | dma_cookie_t last_used; | ||
3948 | dma_cookie_t last_complete; | ||
3931 | enum dma_status ret; | 3949 | enum dma_status ret; |
3932 | 3950 | ||
3933 | ppc440spe_chan = to_ppc440spe_adma_chan(chan); | 3951 | ppc440spe_chan = to_ppc440spe_adma_chan(chan); |
3934 | ret = dma_cookie_status(chan, cookie, txstate); | 3952 | last_used = chan->cookie; |
3953 | last_complete = ppc440spe_chan->completed_cookie; | ||
3954 | |||
3955 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
3956 | |||
3957 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
3935 | if (ret == DMA_SUCCESS) | 3958 | if (ret == DMA_SUCCESS) |
3936 | return ret; | 3959 | return ret; |
3937 | 3960 | ||
3938 | ppc440spe_adma_slot_cleanup(ppc440spe_chan); | 3961 | ppc440spe_adma_slot_cleanup(ppc440spe_chan); |
3939 | 3962 | ||
3940 | return dma_cookie_status(chan, cookie, txstate); | 3963 | last_used = chan->cookie; |
3964 | last_complete = ppc440spe_chan->completed_cookie; | ||
3965 | |||
3966 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
3967 | |||
3968 | return dma_async_is_complete(cookie, last_complete, last_used); | ||
3941 | } | 3969 | } |
3942 | 3970 | ||
3943 | /** | 3971 | /** |
@@ -4022,12 +4050,16 @@ static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan) | |||
4022 | async_tx_ack(&sw_desc->async_tx); | 4050 | async_tx_ack(&sw_desc->async_tx); |
4023 | ppc440spe_desc_init_null_xor(group_start); | 4051 | ppc440spe_desc_init_null_xor(group_start); |
4024 | 4052 | ||
4025 | cookie = dma_cookie_assign(&sw_desc->async_tx); | 4053 | cookie = chan->common.cookie; |
4054 | cookie++; | ||
4055 | if (cookie <= 1) | ||
4056 | cookie = 2; | ||
4026 | 4057 | ||
4027 | /* initialize the completed cookie to be less than | 4058 | /* initialize the completed cookie to be less than |
4028 | * the most recently used cookie | 4059 | * the most recently used cookie |
4029 | */ | 4060 | */ |
4030 | chan->common.completed_cookie = cookie - 1; | 4061 | chan->completed_cookie = cookie - 1; |
4062 | chan->common.cookie = sw_desc->async_tx.cookie = cookie; | ||
4031 | 4063 | ||
4032 | /* channel should not be busy */ | 4064 | /* channel should not be busy */ |
4033 | BUG_ON(ppc440spe_chan_is_busy(chan)); | 4065 | BUG_ON(ppc440spe_chan_is_busy(chan)); |
@@ -4361,7 +4393,7 @@ static void ppc440spe_adma_release_irqs(struct ppc440spe_adma_device *adev, | |||
4361 | /** | 4393 | /** |
4362 | * ppc440spe_adma_probe - probe the asynch device | 4394 | * ppc440spe_adma_probe - probe the asynch device |
4363 | */ | 4395 | */ |
4364 | static int ppc440spe_adma_probe(struct platform_device *ofdev) | 4396 | static int __devinit ppc440spe_adma_probe(struct platform_device *ofdev) |
4365 | { | 4397 | { |
4366 | struct device_node *np = ofdev->dev.of_node; | 4398 | struct device_node *np = ofdev->dev.of_node; |
4367 | struct resource res; | 4399 | struct resource res; |
@@ -4446,7 +4478,7 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev) | |||
4446 | ret = -ENOMEM; | 4478 | ret = -ENOMEM; |
4447 | goto err_dma_alloc; | 4479 | goto err_dma_alloc; |
4448 | } | 4480 | } |
4449 | dev_dbg(&ofdev->dev, "allocated descriptor pool virt 0x%p phys 0x%llx\n", | 4481 | dev_dbg(&ofdev->dev, "allocted descriptor pool virt 0x%p phys 0x%llx\n", |
4450 | adev->dma_desc_pool_virt, (u64)adev->dma_desc_pool); | 4482 | adev->dma_desc_pool_virt, (u64)adev->dma_desc_pool); |
4451 | 4483 | ||
4452 | regs = ioremap(res.start, resource_size(&res)); | 4484 | regs = ioremap(res.start, resource_size(&res)); |
@@ -4497,7 +4529,6 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev) | |||
4497 | INIT_LIST_HEAD(&chan->all_slots); | 4529 | INIT_LIST_HEAD(&chan->all_slots); |
4498 | chan->device = adev; | 4530 | chan->device = adev; |
4499 | chan->common.device = &adev->common; | 4531 | chan->common.device = &adev->common; |
4500 | dma_cookie_init(&chan->common); | ||
4501 | list_add_tail(&chan->common.device_node, &adev->common.channels); | 4532 | list_add_tail(&chan->common.device_node, &adev->common.channels); |
4502 | tasklet_init(&chan->irq_tasklet, ppc440spe_adma_tasklet, | 4533 | tasklet_init(&chan->irq_tasklet, ppc440spe_adma_tasklet, |
4503 | (unsigned long)chan); | 4534 | (unsigned long)chan); |
@@ -4592,7 +4623,7 @@ out: | |||
4592 | /** | 4623 | /** |
4593 | * ppc440spe_adma_remove - remove the asynch device | 4624 | * ppc440spe_adma_remove - remove the asynch device |
4594 | */ | 4625 | */ |
4595 | static int ppc440spe_adma_remove(struct platform_device *ofdev) | 4626 | static int __devexit ppc440spe_adma_remove(struct platform_device *ofdev) |
4596 | { | 4627 | { |
4597 | struct ppc440spe_adma_device *adev = dev_get_drvdata(&ofdev->dev); | 4628 | struct ppc440spe_adma_device *adev = dev_get_drvdata(&ofdev->dev); |
4598 | struct device_node *np = ofdev->dev.of_node; | 4629 | struct device_node *np = ofdev->dev.of_node; |
@@ -4905,7 +4936,7 @@ out_free: | |||
4905 | return ret; | 4936 | return ret; |
4906 | } | 4937 | } |
4907 | 4938 | ||
4908 | static const struct of_device_id ppc440spe_adma_of_match[] = { | 4939 | static const struct of_device_id ppc440spe_adma_of_match[] __devinitconst = { |
4909 | { .compatible = "ibm,dma-440spe", }, | 4940 | { .compatible = "ibm,dma-440spe", }, |
4910 | { .compatible = "amcc,xor-accelerator", }, | 4941 | { .compatible = "amcc,xor-accelerator", }, |
4911 | {}, | 4942 | {}, |
@@ -4914,7 +4945,7 @@ MODULE_DEVICE_TABLE(of, ppc440spe_adma_of_match); | |||
4914 | 4945 | ||
4915 | static struct platform_driver ppc440spe_adma_driver = { | 4946 | static struct platform_driver ppc440spe_adma_driver = { |
4916 | .probe = ppc440spe_adma_probe, | 4947 | .probe = ppc440spe_adma_probe, |
4917 | .remove = ppc440spe_adma_remove, | 4948 | .remove = __devexit_p(ppc440spe_adma_remove), |
4918 | .driver = { | 4949 | .driver = { |
4919 | .name = "PPC440SP(E)-ADMA", | 4950 | .name = "PPC440SP(E)-ADMA", |
4920 | .owner = THIS_MODULE, | 4951 | .owner = THIS_MODULE, |
diff --git a/drivers/dma/ppc4xx/adma.h b/drivers/dma/ppc4xx/adma.h index 26b7a5ed9ac..8ada5a812e3 100644 --- a/drivers/dma/ppc4xx/adma.h +++ b/drivers/dma/ppc4xx/adma.h | |||
@@ -81,6 +81,7 @@ struct ppc440spe_adma_device { | |||
81 | * @common: common dmaengine channel object members | 81 | * @common: common dmaengine channel object members |
82 | * @all_slots: complete domain of slots usable by the channel | 82 | * @all_slots: complete domain of slots usable by the channel |
83 | * @pending: allows batching of hardware operations | 83 | * @pending: allows batching of hardware operations |
84 | * @completed_cookie: identifier for the most recently completed operation | ||
84 | * @slots_allocated: records the actual size of the descriptor slot pool | 85 | * @slots_allocated: records the actual size of the descriptor slot pool |
85 | * @hw_chain_inited: h/w descriptor chain initialization flag | 86 | * @hw_chain_inited: h/w descriptor chain initialization flag |
86 | * @irq_tasklet: bottom half where ppc440spe_adma_slot_cleanup runs | 87 | * @irq_tasklet: bottom half where ppc440spe_adma_slot_cleanup runs |
@@ -98,6 +99,7 @@ struct ppc440spe_adma_chan { | |||
98 | struct list_head all_slots; | 99 | struct list_head all_slots; |
99 | struct ppc440spe_adma_desc_slot *last_used; | 100 | struct ppc440spe_adma_desc_slot *last_used; |
100 | int pending; | 101 | int pending; |
102 | dma_cookie_t completed_cookie; | ||
101 | int slots_allocated; | 103 | int slots_allocated; |
102 | int hw_chain_inited; | 104 | int hw_chain_inited; |
103 | struct tasklet_struct irq_tasklet; | 105 | struct tasklet_struct irq_tasklet; |
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c deleted file mode 100644 index 461a91ab70b..00000000000 --- a/drivers/dma/sa11x0-dma.c +++ /dev/null | |||
@@ -1,1105 +0,0 @@ | |||
1 | /* | ||
2 | * SA11x0 DMAengine support | ||
3 | * | ||
4 | * Copyright (C) 2012 Russell King | ||
5 | * Derived in part from arch/arm/mach-sa1100/dma.c, | ||
6 | * Copyright (C) 2000, 2001 by Nicolas Pitre | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/device.h> | ||
14 | #include <linux/dmaengine.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/interrupt.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/platform_device.h> | ||
20 | #include <linux/sa11x0-dma.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/spinlock.h> | ||
23 | |||
24 | #include "virt-dma.h" | ||
25 | |||
26 | #define NR_PHY_CHAN 6 | ||
27 | #define DMA_ALIGN 3 | ||
28 | #define DMA_MAX_SIZE 0x1fff | ||
29 | #define DMA_CHUNK_SIZE 0x1000 | ||
30 | |||
31 | #define DMA_DDAR 0x00 | ||
32 | #define DMA_DCSR_S 0x04 | ||
33 | #define DMA_DCSR_C 0x08 | ||
34 | #define DMA_DCSR_R 0x0c | ||
35 | #define DMA_DBSA 0x10 | ||
36 | #define DMA_DBTA 0x14 | ||
37 | #define DMA_DBSB 0x18 | ||
38 | #define DMA_DBTB 0x1c | ||
39 | #define DMA_SIZE 0x20 | ||
40 | |||
41 | #define DCSR_RUN (1 << 0) | ||
42 | #define DCSR_IE (1 << 1) | ||
43 | #define DCSR_ERROR (1 << 2) | ||
44 | #define DCSR_DONEA (1 << 3) | ||
45 | #define DCSR_STRTA (1 << 4) | ||
46 | #define DCSR_DONEB (1 << 5) | ||
47 | #define DCSR_STRTB (1 << 6) | ||
48 | #define DCSR_BIU (1 << 7) | ||
49 | |||
50 | #define DDAR_RW (1 << 0) /* 0 = W, 1 = R */ | ||
51 | #define DDAR_E (1 << 1) /* 0 = LE, 1 = BE */ | ||
52 | #define DDAR_BS (1 << 2) /* 0 = BS4, 1 = BS8 */ | ||
53 | #define DDAR_DW (1 << 3) /* 0 = 8b, 1 = 16b */ | ||
54 | #define DDAR_Ser0UDCTr (0x0 << 4) | ||
55 | #define DDAR_Ser0UDCRc (0x1 << 4) | ||
56 | #define DDAR_Ser1SDLCTr (0x2 << 4) | ||
57 | #define DDAR_Ser1SDLCRc (0x3 << 4) | ||
58 | #define DDAR_Ser1UARTTr (0x4 << 4) | ||
59 | #define DDAR_Ser1UARTRc (0x5 << 4) | ||
60 | #define DDAR_Ser2ICPTr (0x6 << 4) | ||
61 | #define DDAR_Ser2ICPRc (0x7 << 4) | ||
62 | #define DDAR_Ser3UARTTr (0x8 << 4) | ||
63 | #define DDAR_Ser3UARTRc (0x9 << 4) | ||
64 | #define DDAR_Ser4MCP0Tr (0xa << 4) | ||
65 | #define DDAR_Ser4MCP0Rc (0xb << 4) | ||
66 | #define DDAR_Ser4MCP1Tr (0xc << 4) | ||
67 | #define DDAR_Ser4MCP1Rc (0xd << 4) | ||
68 | #define DDAR_Ser4SSPTr (0xe << 4) | ||
69 | #define DDAR_Ser4SSPRc (0xf << 4) | ||
70 | |||
71 | struct sa11x0_dma_sg { | ||
72 | u32 addr; | ||
73 | u32 len; | ||
74 | }; | ||
75 | |||
76 | struct sa11x0_dma_desc { | ||
77 | struct virt_dma_desc vd; | ||
78 | |||
79 | u32 ddar; | ||
80 | size_t size; | ||
81 | unsigned period; | ||
82 | bool cyclic; | ||
83 | |||
84 | unsigned sglen; | ||
85 | struct sa11x0_dma_sg sg[0]; | ||
86 | }; | ||
87 | |||
88 | struct sa11x0_dma_phy; | ||
89 | |||
90 | struct sa11x0_dma_chan { | ||
91 | struct virt_dma_chan vc; | ||
92 | |||
93 | /* protected by c->vc.lock */ | ||
94 | struct sa11x0_dma_phy *phy; | ||
95 | enum dma_status status; | ||
96 | |||
97 | /* protected by d->lock */ | ||
98 | struct list_head node; | ||
99 | |||
100 | u32 ddar; | ||
101 | const char *name; | ||
102 | }; | ||
103 | |||
104 | struct sa11x0_dma_phy { | ||
105 | void __iomem *base; | ||
106 | struct sa11x0_dma_dev *dev; | ||
107 | unsigned num; | ||
108 | |||
109 | struct sa11x0_dma_chan *vchan; | ||
110 | |||
111 | /* Protected by c->vc.lock */ | ||
112 | unsigned sg_load; | ||
113 | struct sa11x0_dma_desc *txd_load; | ||
114 | unsigned sg_done; | ||
115 | struct sa11x0_dma_desc *txd_done; | ||
116 | #ifdef CONFIG_PM_SLEEP | ||
117 | u32 dbs[2]; | ||
118 | u32 dbt[2]; | ||
119 | u32 dcsr; | ||
120 | #endif | ||
121 | }; | ||
122 | |||
123 | struct sa11x0_dma_dev { | ||
124 | struct dma_device slave; | ||
125 | void __iomem *base; | ||
126 | spinlock_t lock; | ||
127 | struct tasklet_struct task; | ||
128 | struct list_head chan_pending; | ||
129 | struct sa11x0_dma_phy phy[NR_PHY_CHAN]; | ||
130 | }; | ||
131 | |||
132 | static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan) | ||
133 | { | ||
134 | return container_of(chan, struct sa11x0_dma_chan, vc.chan); | ||
135 | } | ||
136 | |||
137 | static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev) | ||
138 | { | ||
139 | return container_of(dmadev, struct sa11x0_dma_dev, slave); | ||
140 | } | ||
141 | |||
142 | static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c) | ||
143 | { | ||
144 | struct virt_dma_desc *vd = vchan_next_desc(&c->vc); | ||
145 | |||
146 | return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL; | ||
147 | } | ||
148 | |||
149 | static void sa11x0_dma_free_desc(struct virt_dma_desc *vd) | ||
150 | { | ||
151 | kfree(container_of(vd, struct sa11x0_dma_desc, vd)); | ||
152 | } | ||
153 | |||
154 | static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd) | ||
155 | { | ||
156 | list_del(&txd->vd.node); | ||
157 | p->txd_load = txd; | ||
158 | p->sg_load = 0; | ||
159 | |||
160 | dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n", | ||
161 | p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar); | ||
162 | } | ||
163 | |||
164 | static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p, | ||
165 | struct sa11x0_dma_chan *c) | ||
166 | { | ||
167 | struct sa11x0_dma_desc *txd = p->txd_load; | ||
168 | struct sa11x0_dma_sg *sg; | ||
169 | void __iomem *base = p->base; | ||
170 | unsigned dbsx, dbtx; | ||
171 | u32 dcsr; | ||
172 | |||
173 | if (!txd) | ||
174 | return; | ||
175 | |||
176 | dcsr = readl_relaxed(base + DMA_DCSR_R); | ||
177 | |||
178 | /* Don't try to load the next transfer if both buffers are started */ | ||
179 | if ((dcsr & (DCSR_STRTA | DCSR_STRTB)) == (DCSR_STRTA | DCSR_STRTB)) | ||
180 | return; | ||
181 | |||
182 | if (p->sg_load == txd->sglen) { | ||
183 | if (!txd->cyclic) { | ||
184 | struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c); | ||
185 | |||
186 | /* | ||
187 | * We have reached the end of the current descriptor. | ||
188 | * Peek at the next descriptor, and if compatible with | ||
189 | * the current, start processing it. | ||
190 | */ | ||
191 | if (txn && txn->ddar == txd->ddar) { | ||
192 | txd = txn; | ||
193 | sa11x0_dma_start_desc(p, txn); | ||
194 | } else { | ||
195 | p->txd_load = NULL; | ||
196 | return; | ||
197 | } | ||
198 | } else { | ||
199 | /* Cyclic: reset back to beginning */ | ||
200 | p->sg_load = 0; | ||
201 | } | ||
202 | } | ||
203 | |||
204 | sg = &txd->sg[p->sg_load++]; | ||
205 | |||
206 | /* Select buffer to load according to channel status */ | ||
207 | if (((dcsr & (DCSR_BIU | DCSR_STRTB)) == (DCSR_BIU | DCSR_STRTB)) || | ||
208 | ((dcsr & (DCSR_BIU | DCSR_STRTA)) == 0)) { | ||
209 | dbsx = DMA_DBSA; | ||
210 | dbtx = DMA_DBTA; | ||
211 | dcsr = DCSR_STRTA | DCSR_IE | DCSR_RUN; | ||
212 | } else { | ||
213 | dbsx = DMA_DBSB; | ||
214 | dbtx = DMA_DBTB; | ||
215 | dcsr = DCSR_STRTB | DCSR_IE | DCSR_RUN; | ||
216 | } | ||
217 | |||
218 | writel_relaxed(sg->addr, base + dbsx); | ||
219 | writel_relaxed(sg->len, base + dbtx); | ||
220 | writel(dcsr, base + DMA_DCSR_S); | ||
221 | |||
222 | dev_dbg(p->dev->slave.dev, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n", | ||
223 | p->num, dcsr, | ||
224 | 'A' + (dbsx == DMA_DBSB), sg->addr, | ||
225 | 'A' + (dbtx == DMA_DBTB), sg->len); | ||
226 | } | ||
227 | |||
228 | static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p, | ||
229 | struct sa11x0_dma_chan *c) | ||
230 | { | ||
231 | struct sa11x0_dma_desc *txd = p->txd_done; | ||
232 | |||
233 | if (++p->sg_done == txd->sglen) { | ||
234 | if (!txd->cyclic) { | ||
235 | vchan_cookie_complete(&txd->vd); | ||
236 | |||
237 | p->sg_done = 0; | ||
238 | p->txd_done = p->txd_load; | ||
239 | |||
240 | if (!p->txd_done) | ||
241 | tasklet_schedule(&p->dev->task); | ||
242 | } else { | ||
243 | if ((p->sg_done % txd->period) == 0) | ||
244 | vchan_cyclic_callback(&txd->vd); | ||
245 | |||
246 | /* Cyclic: reset back to beginning */ | ||
247 | p->sg_done = 0; | ||
248 | } | ||
249 | } | ||
250 | |||
251 | sa11x0_dma_start_sg(p, c); | ||
252 | } | ||
253 | |||
254 | static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id) | ||
255 | { | ||
256 | struct sa11x0_dma_phy *p = dev_id; | ||
257 | struct sa11x0_dma_dev *d = p->dev; | ||
258 | struct sa11x0_dma_chan *c; | ||
259 | u32 dcsr; | ||
260 | |||
261 | dcsr = readl_relaxed(p->base + DMA_DCSR_R); | ||
262 | if (!(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB))) | ||
263 | return IRQ_NONE; | ||
264 | |||
265 | /* Clear reported status bits */ | ||
266 | writel_relaxed(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB), | ||
267 | p->base + DMA_DCSR_C); | ||
268 | |||
269 | dev_dbg(d->slave.dev, "pchan %u: irq: DCSR:%02x\n", p->num, dcsr); | ||
270 | |||
271 | if (dcsr & DCSR_ERROR) { | ||
272 | dev_err(d->slave.dev, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n", | ||
273 | p->num, dcsr, | ||
274 | readl_relaxed(p->base + DMA_DDAR), | ||
275 | readl_relaxed(p->base + DMA_DBSA), | ||
276 | readl_relaxed(p->base + DMA_DBTA), | ||
277 | readl_relaxed(p->base + DMA_DBSB), | ||
278 | readl_relaxed(p->base + DMA_DBTB)); | ||
279 | } | ||
280 | |||
281 | c = p->vchan; | ||
282 | if (c) { | ||
283 | unsigned long flags; | ||
284 | |||
285 | spin_lock_irqsave(&c->vc.lock, flags); | ||
286 | /* | ||
287 | * Now that we're holding the lock, check that the vchan | ||
288 | * really is associated with this pchan before touching the | ||
289 | * hardware. This should always succeed, because we won't | ||
290 | * change p->vchan or c->phy while the channel is actively | ||
291 | * transferring. | ||
292 | */ | ||
293 | if (c->phy == p) { | ||
294 | if (dcsr & DCSR_DONEA) | ||
295 | sa11x0_dma_complete(p, c); | ||
296 | if (dcsr & DCSR_DONEB) | ||
297 | sa11x0_dma_complete(p, c); | ||
298 | } | ||
299 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
300 | } | ||
301 | |||
302 | return IRQ_HANDLED; | ||
303 | } | ||
304 | |||
305 | static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c) | ||
306 | { | ||
307 | struct sa11x0_dma_desc *txd = sa11x0_dma_next_desc(c); | ||
308 | |||
309 | /* If the issued list is empty, we have no further txds to process */ | ||
310 | if (txd) { | ||
311 | struct sa11x0_dma_phy *p = c->phy; | ||
312 | |||
313 | sa11x0_dma_start_desc(p, txd); | ||
314 | p->txd_done = txd; | ||
315 | p->sg_done = 0; | ||
316 | |||
317 | /* The channel should not have any transfers started */ | ||
318 | WARN_ON(readl_relaxed(p->base + DMA_DCSR_R) & | ||
319 | (DCSR_STRTA | DCSR_STRTB)); | ||
320 | |||
321 | /* Clear the run and start bits before changing DDAR */ | ||
322 | writel_relaxed(DCSR_RUN | DCSR_STRTA | DCSR_STRTB, | ||
323 | p->base + DMA_DCSR_C); | ||
324 | writel_relaxed(txd->ddar, p->base + DMA_DDAR); | ||
325 | |||
326 | /* Try to start both buffers */ | ||
327 | sa11x0_dma_start_sg(p, c); | ||
328 | sa11x0_dma_start_sg(p, c); | ||
329 | } | ||
330 | } | ||
331 | |||
332 | static void sa11x0_dma_tasklet(unsigned long arg) | ||
333 | { | ||
334 | struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg; | ||
335 | struct sa11x0_dma_phy *p; | ||
336 | struct sa11x0_dma_chan *c; | ||
337 | unsigned pch, pch_alloc = 0; | ||
338 | |||
339 | dev_dbg(d->slave.dev, "tasklet enter\n"); | ||
340 | |||
341 | list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) { | ||
342 | spin_lock_irq(&c->vc.lock); | ||
343 | p = c->phy; | ||
344 | if (p && !p->txd_done) { | ||
345 | sa11x0_dma_start_txd(c); | ||
346 | if (!p->txd_done) { | ||
347 | /* No current txd associated with this channel */ | ||
348 | dev_dbg(d->slave.dev, "pchan %u: free\n", p->num); | ||
349 | |||
350 | /* Mark this channel free */ | ||
351 | c->phy = NULL; | ||
352 | p->vchan = NULL; | ||
353 | } | ||
354 | } | ||
355 | spin_unlock_irq(&c->vc.lock); | ||
356 | } | ||
357 | |||
358 | spin_lock_irq(&d->lock); | ||
359 | for (pch = 0; pch < NR_PHY_CHAN; pch++) { | ||
360 | p = &d->phy[pch]; | ||
361 | |||
362 | if (p->vchan == NULL && !list_empty(&d->chan_pending)) { | ||
363 | c = list_first_entry(&d->chan_pending, | ||
364 | struct sa11x0_dma_chan, node); | ||
365 | list_del_init(&c->node); | ||
366 | |||
367 | pch_alloc |= 1 << pch; | ||
368 | |||
369 | /* Mark this channel allocated */ | ||
370 | p->vchan = c; | ||
371 | |||
372 | dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc); | ||
373 | } | ||
374 | } | ||
375 | spin_unlock_irq(&d->lock); | ||
376 | |||
377 | for (pch = 0; pch < NR_PHY_CHAN; pch++) { | ||
378 | if (pch_alloc & (1 << pch)) { | ||
379 | p = &d->phy[pch]; | ||
380 | c = p->vchan; | ||
381 | |||
382 | spin_lock_irq(&c->vc.lock); | ||
383 | c->phy = p; | ||
384 | |||
385 | sa11x0_dma_start_txd(c); | ||
386 | spin_unlock_irq(&c->vc.lock); | ||
387 | } | ||
388 | } | ||
389 | |||
390 | dev_dbg(d->slave.dev, "tasklet exit\n"); | ||
391 | } | ||
392 | |||
393 | |||
394 | static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan) | ||
395 | { | ||
396 | return 0; | ||
397 | } | ||
398 | |||
399 | static void sa11x0_dma_free_chan_resources(struct dma_chan *chan) | ||
400 | { | ||
401 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | ||
402 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); | ||
403 | unsigned long flags; | ||
404 | |||
405 | spin_lock_irqsave(&d->lock, flags); | ||
406 | list_del_init(&c->node); | ||
407 | spin_unlock_irqrestore(&d->lock, flags); | ||
408 | |||
409 | vchan_free_chan_resources(&c->vc); | ||
410 | } | ||
411 | |||
412 | static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p) | ||
413 | { | ||
414 | unsigned reg; | ||
415 | u32 dcsr; | ||
416 | |||
417 | dcsr = readl_relaxed(p->base + DMA_DCSR_R); | ||
418 | |||
419 | if ((dcsr & (DCSR_BIU | DCSR_STRTA)) == DCSR_STRTA || | ||
420 | (dcsr & (DCSR_BIU | DCSR_STRTB)) == DCSR_BIU) | ||
421 | reg = DMA_DBSA; | ||
422 | else | ||
423 | reg = DMA_DBSB; | ||
424 | |||
425 | return readl_relaxed(p->base + reg); | ||
426 | } | ||
427 | |||
428 | static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, | ||
429 | dma_cookie_t cookie, struct dma_tx_state *state) | ||
430 | { | ||
431 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | ||
432 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); | ||
433 | struct sa11x0_dma_phy *p; | ||
434 | struct virt_dma_desc *vd; | ||
435 | unsigned long flags; | ||
436 | enum dma_status ret; | ||
437 | |||
438 | ret = dma_cookie_status(&c->vc.chan, cookie, state); | ||
439 | if (ret == DMA_SUCCESS) | ||
440 | return ret; | ||
441 | |||
442 | if (!state) | ||
443 | return c->status; | ||
444 | |||
445 | spin_lock_irqsave(&c->vc.lock, flags); | ||
446 | p = c->phy; | ||
447 | |||
448 | /* | ||
449 | * If the cookie is on our issue queue, then the residue is | ||
450 | * its total size. | ||
451 | */ | ||
452 | vd = vchan_find_desc(&c->vc, cookie); | ||
453 | if (vd) { | ||
454 | state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size; | ||
455 | } else if (!p) { | ||
456 | state->residue = 0; | ||
457 | } else { | ||
458 | struct sa11x0_dma_desc *txd; | ||
459 | size_t bytes = 0; | ||
460 | |||
461 | if (p->txd_done && p->txd_done->vd.tx.cookie == cookie) | ||
462 | txd = p->txd_done; | ||
463 | else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie) | ||
464 | txd = p->txd_load; | ||
465 | else | ||
466 | txd = NULL; | ||
467 | |||
468 | ret = c->status; | ||
469 | if (txd) { | ||
470 | dma_addr_t addr = sa11x0_dma_pos(p); | ||
471 | unsigned i; | ||
472 | |||
473 | dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr); | ||
474 | |||
475 | for (i = 0; i < txd->sglen; i++) { | ||
476 | dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n", | ||
477 | i, txd->sg[i].addr, txd->sg[i].len); | ||
478 | if (addr >= txd->sg[i].addr && | ||
479 | addr < txd->sg[i].addr + txd->sg[i].len) { | ||
480 | unsigned len; | ||
481 | |||
482 | len = txd->sg[i].len - | ||
483 | (addr - txd->sg[i].addr); | ||
484 | dev_vdbg(d->slave.dev, "tx_status: [%u] +%x\n", | ||
485 | i, len); | ||
486 | bytes += len; | ||
487 | i++; | ||
488 | break; | ||
489 | } | ||
490 | } | ||
491 | for (; i < txd->sglen; i++) { | ||
492 | dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x ++\n", | ||
493 | i, txd->sg[i].addr, txd->sg[i].len); | ||
494 | bytes += txd->sg[i].len; | ||
495 | } | ||
496 | } | ||
497 | state->residue = bytes; | ||
498 | } | ||
499 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
500 | |||
501 | dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", state->residue); | ||
502 | |||
503 | return ret; | ||
504 | } | ||
505 | |||
506 | /* | ||
507 | * Move pending txds to the issued list, and re-init pending list. | ||
508 | * If not already pending, add this channel to the list of pending | ||
509 | * channels and trigger the tasklet to run. | ||
510 | */ | ||
511 | static void sa11x0_dma_issue_pending(struct dma_chan *chan) | ||
512 | { | ||
513 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | ||
514 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); | ||
515 | unsigned long flags; | ||
516 | |||
517 | spin_lock_irqsave(&c->vc.lock, flags); | ||
518 | if (vchan_issue_pending(&c->vc)) { | ||
519 | if (!c->phy) { | ||
520 | spin_lock(&d->lock); | ||
521 | if (list_empty(&c->node)) { | ||
522 | list_add_tail(&c->node, &d->chan_pending); | ||
523 | tasklet_schedule(&d->task); | ||
524 | dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc); | ||
525 | } | ||
526 | spin_unlock(&d->lock); | ||
527 | } | ||
528 | } else | ||
529 | dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc); | ||
530 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
531 | } | ||
532 | |||
533 | static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( | ||
534 | struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen, | ||
535 | enum dma_transfer_direction dir, unsigned long flags, void *context) | ||
536 | { | ||
537 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | ||
538 | struct sa11x0_dma_desc *txd; | ||
539 | struct scatterlist *sgent; | ||
540 | unsigned i, j = sglen; | ||
541 | size_t size = 0; | ||
542 | |||
543 | /* SA11x0 channels can only operate in their native direction */ | ||
544 | if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { | ||
545 | dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n", | ||
546 | &c->vc, c->ddar, dir); | ||
547 | return NULL; | ||
548 | } | ||
549 | |||
550 | /* Do not allow zero-sized txds */ | ||
551 | if (sglen == 0) | ||
552 | return NULL; | ||
553 | |||
554 | for_each_sg(sg, sgent, sglen, i) { | ||
555 | dma_addr_t addr = sg_dma_address(sgent); | ||
556 | unsigned int len = sg_dma_len(sgent); | ||
557 | |||
558 | if (len > DMA_MAX_SIZE) | ||
559 | j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1; | ||
560 | if (addr & DMA_ALIGN) { | ||
561 | dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n", | ||
562 | &c->vc, addr); | ||
563 | return NULL; | ||
564 | } | ||
565 | } | ||
566 | |||
567 | txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC); | ||
568 | if (!txd) { | ||
569 | dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc); | ||
570 | return NULL; | ||
571 | } | ||
572 | |||
573 | j = 0; | ||
574 | for_each_sg(sg, sgent, sglen, i) { | ||
575 | dma_addr_t addr = sg_dma_address(sgent); | ||
576 | unsigned len = sg_dma_len(sgent); | ||
577 | |||
578 | size += len; | ||
579 | |||
580 | do { | ||
581 | unsigned tlen = len; | ||
582 | |||
583 | /* | ||
584 | * Check whether the transfer will fit. If not, try | ||
585 | * to split the transfer up such that we end up with | ||
586 | * equal chunks - but make sure that we preserve the | ||
587 | * alignment. This avoids small segments. | ||
588 | */ | ||
589 | if (tlen > DMA_MAX_SIZE) { | ||
590 | unsigned mult = DIV_ROUND_UP(tlen, | ||
591 | DMA_MAX_SIZE & ~DMA_ALIGN); | ||
592 | |||
593 | tlen = (tlen / mult) & ~DMA_ALIGN; | ||
594 | } | ||
595 | |||
596 | txd->sg[j].addr = addr; | ||
597 | txd->sg[j].len = tlen; | ||
598 | |||
599 | addr += tlen; | ||
600 | len -= tlen; | ||
601 | j++; | ||
602 | } while (len); | ||
603 | } | ||
604 | |||
605 | txd->ddar = c->ddar; | ||
606 | txd->size = size; | ||
607 | txd->sglen = j; | ||
608 | |||
609 | dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n", | ||
610 | &c->vc, &txd->vd, txd->size, txd->sglen); | ||
611 | |||
612 | return vchan_tx_prep(&c->vc, &txd->vd, flags); | ||
613 | } | ||
614 | |||
615 | static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic( | ||
616 | struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period, | ||
617 | enum dma_transfer_direction dir, unsigned long flags, void *context) | ||
618 | { | ||
619 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | ||
620 | struct sa11x0_dma_desc *txd; | ||
621 | unsigned i, j, k, sglen, sgperiod; | ||
622 | |||
623 | /* SA11x0 channels can only operate in their native direction */ | ||
624 | if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { | ||
625 | dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n", | ||
626 | &c->vc, c->ddar, dir); | ||
627 | return NULL; | ||
628 | } | ||
629 | |||
630 | sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN); | ||
631 | sglen = size * sgperiod / period; | ||
632 | |||
633 | /* Do not allow zero-sized txds */ | ||
634 | if (sglen == 0) | ||
635 | return NULL; | ||
636 | |||
637 | txd = kzalloc(sizeof(*txd) + sglen * sizeof(txd->sg[0]), GFP_ATOMIC); | ||
638 | if (!txd) { | ||
639 | dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc); | ||
640 | return NULL; | ||
641 | } | ||
642 | |||
643 | for (i = k = 0; i < size / period; i++) { | ||
644 | size_t tlen, len = period; | ||
645 | |||
646 | for (j = 0; j < sgperiod; j++, k++) { | ||
647 | tlen = len; | ||
648 | |||
649 | if (tlen > DMA_MAX_SIZE) { | ||
650 | unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN); | ||
651 | tlen = (tlen / mult) & ~DMA_ALIGN; | ||
652 | } | ||
653 | |||
654 | txd->sg[k].addr = addr; | ||
655 | txd->sg[k].len = tlen; | ||
656 | addr += tlen; | ||
657 | len -= tlen; | ||
658 | } | ||
659 | |||
660 | WARN_ON(len != 0); | ||
661 | } | ||
662 | |||
663 | WARN_ON(k != sglen); | ||
664 | |||
665 | txd->ddar = c->ddar; | ||
666 | txd->size = size; | ||
667 | txd->sglen = sglen; | ||
668 | txd->cyclic = 1; | ||
669 | txd->period = sgperiod; | ||
670 | |||
671 | return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
672 | } | ||
673 | |||
674 | static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg) | ||
675 | { | ||
676 | u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW); | ||
677 | dma_addr_t addr; | ||
678 | enum dma_slave_buswidth width; | ||
679 | u32 maxburst; | ||
680 | |||
681 | if (ddar & DDAR_RW) { | ||
682 | addr = cfg->src_addr; | ||
683 | width = cfg->src_addr_width; | ||
684 | maxburst = cfg->src_maxburst; | ||
685 | } else { | ||
686 | addr = cfg->dst_addr; | ||
687 | width = cfg->dst_addr_width; | ||
688 | maxburst = cfg->dst_maxburst; | ||
689 | } | ||
690 | |||
691 | if ((width != DMA_SLAVE_BUSWIDTH_1_BYTE && | ||
692 | width != DMA_SLAVE_BUSWIDTH_2_BYTES) || | ||
693 | (maxburst != 4 && maxburst != 8)) | ||
694 | return -EINVAL; | ||
695 | |||
696 | if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) | ||
697 | ddar |= DDAR_DW; | ||
698 | if (maxburst == 8) | ||
699 | ddar |= DDAR_BS; | ||
700 | |||
701 | dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n", | ||
702 | &c->vc, addr, width, maxburst); | ||
703 | |||
704 | c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6; | ||
705 | |||
706 | return 0; | ||
707 | } | ||
708 | |||
709 | static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
710 | unsigned long arg) | ||
711 | { | ||
712 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | ||
713 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); | ||
714 | struct sa11x0_dma_phy *p; | ||
715 | LIST_HEAD(head); | ||
716 | unsigned long flags; | ||
717 | int ret; | ||
718 | |||
719 | switch (cmd) { | ||
720 | case DMA_SLAVE_CONFIG: | ||
721 | return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg); | ||
722 | |||
723 | case DMA_TERMINATE_ALL: | ||
724 | dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); | ||
725 | /* Clear the tx descriptor lists */ | ||
726 | spin_lock_irqsave(&c->vc.lock, flags); | ||
727 | vchan_get_all_descriptors(&c->vc, &head); | ||
728 | |||
729 | p = c->phy; | ||
730 | if (p) { | ||
731 | dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num); | ||
732 | /* vchan is assigned to a pchan - stop the channel */ | ||
733 | writel(DCSR_RUN | DCSR_IE | | ||
734 | DCSR_STRTA | DCSR_DONEA | | ||
735 | DCSR_STRTB | DCSR_DONEB, | ||
736 | p->base + DMA_DCSR_C); | ||
737 | |||
738 | if (p->txd_load) { | ||
739 | if (p->txd_load != p->txd_done) | ||
740 | list_add_tail(&p->txd_load->vd.node, &head); | ||
741 | p->txd_load = NULL; | ||
742 | } | ||
743 | if (p->txd_done) { | ||
744 | list_add_tail(&p->txd_done->vd.node, &head); | ||
745 | p->txd_done = NULL; | ||
746 | } | ||
747 | c->phy = NULL; | ||
748 | spin_lock(&d->lock); | ||
749 | p->vchan = NULL; | ||
750 | spin_unlock(&d->lock); | ||
751 | tasklet_schedule(&d->task); | ||
752 | } | ||
753 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
754 | vchan_dma_desc_free_list(&c->vc, &head); | ||
755 | ret = 0; | ||
756 | break; | ||
757 | |||
758 | case DMA_PAUSE: | ||
759 | dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); | ||
760 | spin_lock_irqsave(&c->vc.lock, flags); | ||
761 | if (c->status == DMA_IN_PROGRESS) { | ||
762 | c->status = DMA_PAUSED; | ||
763 | |||
764 | p = c->phy; | ||
765 | if (p) { | ||
766 | writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C); | ||
767 | } else { | ||
768 | spin_lock(&d->lock); | ||
769 | list_del_init(&c->node); | ||
770 | spin_unlock(&d->lock); | ||
771 | } | ||
772 | } | ||
773 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
774 | ret = 0; | ||
775 | break; | ||
776 | |||
777 | case DMA_RESUME: | ||
778 | dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); | ||
779 | spin_lock_irqsave(&c->vc.lock, flags); | ||
780 | if (c->status == DMA_PAUSED) { | ||
781 | c->status = DMA_IN_PROGRESS; | ||
782 | |||
783 | p = c->phy; | ||
784 | if (p) { | ||
785 | writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S); | ||
786 | } else if (!list_empty(&c->vc.desc_issued)) { | ||
787 | spin_lock(&d->lock); | ||
788 | list_add_tail(&c->node, &d->chan_pending); | ||
789 | spin_unlock(&d->lock); | ||
790 | } | ||
791 | } | ||
792 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
793 | ret = 0; | ||
794 | break; | ||
795 | |||
796 | default: | ||
797 | ret = -ENXIO; | ||
798 | break; | ||
799 | } | ||
800 | |||
801 | return ret; | ||
802 | } | ||
803 | |||
804 | struct sa11x0_dma_channel_desc { | ||
805 | u32 ddar; | ||
806 | const char *name; | ||
807 | }; | ||
808 | |||
809 | #define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 } | ||
810 | static const struct sa11x0_dma_channel_desc chan_desc[] = { | ||
811 | CD(Ser0UDCTr, 0), | ||
812 | CD(Ser0UDCRc, DDAR_RW), | ||
813 | CD(Ser1SDLCTr, 0), | ||
814 | CD(Ser1SDLCRc, DDAR_RW), | ||
815 | CD(Ser1UARTTr, 0), | ||
816 | CD(Ser1UARTRc, DDAR_RW), | ||
817 | CD(Ser2ICPTr, 0), | ||
818 | CD(Ser2ICPRc, DDAR_RW), | ||
819 | CD(Ser3UARTTr, 0), | ||
820 | CD(Ser3UARTRc, DDAR_RW), | ||
821 | CD(Ser4MCP0Tr, 0), | ||
822 | CD(Ser4MCP0Rc, DDAR_RW), | ||
823 | CD(Ser4MCP1Tr, 0), | ||
824 | CD(Ser4MCP1Rc, DDAR_RW), | ||
825 | CD(Ser4SSPTr, 0), | ||
826 | CD(Ser4SSPRc, DDAR_RW), | ||
827 | }; | ||
828 | |||
829 | static int sa11x0_dma_init_dmadev(struct dma_device *dmadev, | ||
830 | struct device *dev) | ||
831 | { | ||
832 | unsigned i; | ||
833 | |||
834 | dmadev->chancnt = ARRAY_SIZE(chan_desc); | ||
835 | INIT_LIST_HEAD(&dmadev->channels); | ||
836 | dmadev->dev = dev; | ||
837 | dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources; | ||
838 | dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources; | ||
839 | dmadev->device_control = sa11x0_dma_control; | ||
840 | dmadev->device_tx_status = sa11x0_dma_tx_status; | ||
841 | dmadev->device_issue_pending = sa11x0_dma_issue_pending; | ||
842 | |||
843 | for (i = 0; i < dmadev->chancnt; i++) { | ||
844 | struct sa11x0_dma_chan *c; | ||
845 | |||
846 | c = kzalloc(sizeof(*c), GFP_KERNEL); | ||
847 | if (!c) { | ||
848 | dev_err(dev, "no memory for channel %u\n", i); | ||
849 | return -ENOMEM; | ||
850 | } | ||
851 | |||
852 | c->status = DMA_IN_PROGRESS; | ||
853 | c->ddar = chan_desc[i].ddar; | ||
854 | c->name = chan_desc[i].name; | ||
855 | INIT_LIST_HEAD(&c->node); | ||
856 | |||
857 | c->vc.desc_free = sa11x0_dma_free_desc; | ||
858 | vchan_init(&c->vc, dmadev); | ||
859 | } | ||
860 | |||
861 | return dma_async_device_register(dmadev); | ||
862 | } | ||
863 | |||
864 | static int sa11x0_dma_request_irq(struct platform_device *pdev, int nr, | ||
865 | void *data) | ||
866 | { | ||
867 | int irq = platform_get_irq(pdev, nr); | ||
868 | |||
869 | if (irq <= 0) | ||
870 | return -ENXIO; | ||
871 | |||
872 | return request_irq(irq, sa11x0_dma_irq, 0, dev_name(&pdev->dev), data); | ||
873 | } | ||
874 | |||
875 | static void sa11x0_dma_free_irq(struct platform_device *pdev, int nr, | ||
876 | void *data) | ||
877 | { | ||
878 | int irq = platform_get_irq(pdev, nr); | ||
879 | if (irq > 0) | ||
880 | free_irq(irq, data); | ||
881 | } | ||
882 | |||
883 | static void sa11x0_dma_free_channels(struct dma_device *dmadev) | ||
884 | { | ||
885 | struct sa11x0_dma_chan *c, *cn; | ||
886 | |||
887 | list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) { | ||
888 | list_del(&c->vc.chan.device_node); | ||
889 | tasklet_kill(&c->vc.task); | ||
890 | kfree(c); | ||
891 | } | ||
892 | } | ||
893 | |||
894 | static int sa11x0_dma_probe(struct platform_device *pdev) | ||
895 | { | ||
896 | struct sa11x0_dma_dev *d; | ||
897 | struct resource *res; | ||
898 | unsigned i; | ||
899 | int ret; | ||
900 | |||
901 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
902 | if (!res) | ||
903 | return -ENXIO; | ||
904 | |||
905 | d = kzalloc(sizeof(*d), GFP_KERNEL); | ||
906 | if (!d) { | ||
907 | ret = -ENOMEM; | ||
908 | goto err_alloc; | ||
909 | } | ||
910 | |||
911 | spin_lock_init(&d->lock); | ||
912 | INIT_LIST_HEAD(&d->chan_pending); | ||
913 | |||
914 | d->base = ioremap(res->start, resource_size(res)); | ||
915 | if (!d->base) { | ||
916 | ret = -ENOMEM; | ||
917 | goto err_ioremap; | ||
918 | } | ||
919 | |||
920 | tasklet_init(&d->task, sa11x0_dma_tasklet, (unsigned long)d); | ||
921 | |||
922 | for (i = 0; i < NR_PHY_CHAN; i++) { | ||
923 | struct sa11x0_dma_phy *p = &d->phy[i]; | ||
924 | |||
925 | p->dev = d; | ||
926 | p->num = i; | ||
927 | p->base = d->base + i * DMA_SIZE; | ||
928 | writel_relaxed(DCSR_RUN | DCSR_IE | DCSR_ERROR | | ||
929 | DCSR_DONEA | DCSR_STRTA | DCSR_DONEB | DCSR_STRTB, | ||
930 | p->base + DMA_DCSR_C); | ||
931 | writel_relaxed(0, p->base + DMA_DDAR); | ||
932 | |||
933 | ret = sa11x0_dma_request_irq(pdev, i, p); | ||
934 | if (ret) { | ||
935 | while (i) { | ||
936 | i--; | ||
937 | sa11x0_dma_free_irq(pdev, i, &d->phy[i]); | ||
938 | } | ||
939 | goto err_irq; | ||
940 | } | ||
941 | } | ||
942 | |||
943 | dma_cap_set(DMA_SLAVE, d->slave.cap_mask); | ||
944 | dma_cap_set(DMA_CYCLIC, d->slave.cap_mask); | ||
945 | d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg; | ||
946 | d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic; | ||
947 | ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev); | ||
948 | if (ret) { | ||
949 | dev_warn(d->slave.dev, "failed to register slave async device: %d\n", | ||
950 | ret); | ||
951 | goto err_slave_reg; | ||
952 | } | ||
953 | |||
954 | platform_set_drvdata(pdev, d); | ||
955 | return 0; | ||
956 | |||
957 | err_slave_reg: | ||
958 | sa11x0_dma_free_channels(&d->slave); | ||
959 | for (i = 0; i < NR_PHY_CHAN; i++) | ||
960 | sa11x0_dma_free_irq(pdev, i, &d->phy[i]); | ||
961 | err_irq: | ||
962 | tasklet_kill(&d->task); | ||
963 | iounmap(d->base); | ||
964 | err_ioremap: | ||
965 | kfree(d); | ||
966 | err_alloc: | ||
967 | return ret; | ||
968 | } | ||
969 | |||
970 | static int sa11x0_dma_remove(struct platform_device *pdev) | ||
971 | { | ||
972 | struct sa11x0_dma_dev *d = platform_get_drvdata(pdev); | ||
973 | unsigned pch; | ||
974 | |||
975 | dma_async_device_unregister(&d->slave); | ||
976 | |||
977 | sa11x0_dma_free_channels(&d->slave); | ||
978 | for (pch = 0; pch < NR_PHY_CHAN; pch++) | ||
979 | sa11x0_dma_free_irq(pdev, pch, &d->phy[pch]); | ||
980 | tasklet_kill(&d->task); | ||
981 | iounmap(d->base); | ||
982 | kfree(d); | ||
983 | |||
984 | return 0; | ||
985 | } | ||
986 | |||
987 | #ifdef CONFIG_PM_SLEEP | ||
988 | static int sa11x0_dma_suspend(struct device *dev) | ||
989 | { | ||
990 | struct sa11x0_dma_dev *d = dev_get_drvdata(dev); | ||
991 | unsigned pch; | ||
992 | |||
993 | for (pch = 0; pch < NR_PHY_CHAN; pch++) { | ||
994 | struct sa11x0_dma_phy *p = &d->phy[pch]; | ||
995 | u32 dcsr, saved_dcsr; | ||
996 | |||
997 | dcsr = saved_dcsr = readl_relaxed(p->base + DMA_DCSR_R); | ||
998 | if (dcsr & DCSR_RUN) { | ||
999 | writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C); | ||
1000 | dcsr = readl_relaxed(p->base + DMA_DCSR_R); | ||
1001 | } | ||
1002 | |||
1003 | saved_dcsr &= DCSR_RUN | DCSR_IE; | ||
1004 | if (dcsr & DCSR_BIU) { | ||
1005 | p->dbs[0] = readl_relaxed(p->base + DMA_DBSB); | ||
1006 | p->dbt[0] = readl_relaxed(p->base + DMA_DBTB); | ||
1007 | p->dbs[1] = readl_relaxed(p->base + DMA_DBSA); | ||
1008 | p->dbt[1] = readl_relaxed(p->base + DMA_DBTA); | ||
1009 | saved_dcsr |= (dcsr & DCSR_STRTA ? DCSR_STRTB : 0) | | ||
1010 | (dcsr & DCSR_STRTB ? DCSR_STRTA : 0); | ||
1011 | } else { | ||
1012 | p->dbs[0] = readl_relaxed(p->base + DMA_DBSA); | ||
1013 | p->dbt[0] = readl_relaxed(p->base + DMA_DBTA); | ||
1014 | p->dbs[1] = readl_relaxed(p->base + DMA_DBSB); | ||
1015 | p->dbt[1] = readl_relaxed(p->base + DMA_DBTB); | ||
1016 | saved_dcsr |= dcsr & (DCSR_STRTA | DCSR_STRTB); | ||
1017 | } | ||
1018 | p->dcsr = saved_dcsr; | ||
1019 | |||
1020 | writel(DCSR_STRTA | DCSR_STRTB, p->base + DMA_DCSR_C); | ||
1021 | } | ||
1022 | |||
1023 | return 0; | ||
1024 | } | ||
1025 | |||
1026 | static int sa11x0_dma_resume(struct device *dev) | ||
1027 | { | ||
1028 | struct sa11x0_dma_dev *d = dev_get_drvdata(dev); | ||
1029 | unsigned pch; | ||
1030 | |||
1031 | for (pch = 0; pch < NR_PHY_CHAN; pch++) { | ||
1032 | struct sa11x0_dma_phy *p = &d->phy[pch]; | ||
1033 | struct sa11x0_dma_desc *txd = NULL; | ||
1034 | u32 dcsr = readl_relaxed(p->base + DMA_DCSR_R); | ||
1035 | |||
1036 | WARN_ON(dcsr & (DCSR_BIU | DCSR_STRTA | DCSR_STRTB | DCSR_RUN)); | ||
1037 | |||
1038 | if (p->txd_done) | ||
1039 | txd = p->txd_done; | ||
1040 | else if (p->txd_load) | ||
1041 | txd = p->txd_load; | ||
1042 | |||
1043 | if (!txd) | ||
1044 | continue; | ||
1045 | |||
1046 | writel_relaxed(txd->ddar, p->base + DMA_DDAR); | ||
1047 | |||
1048 | writel_relaxed(p->dbs[0], p->base + DMA_DBSA); | ||
1049 | writel_relaxed(p->dbt[0], p->base + DMA_DBTA); | ||
1050 | writel_relaxed(p->dbs[1], p->base + DMA_DBSB); | ||
1051 | writel_relaxed(p->dbt[1], p->base + DMA_DBTB); | ||
1052 | writel_relaxed(p->dcsr, p->base + DMA_DCSR_S); | ||
1053 | } | ||
1054 | |||
1055 | return 0; | ||
1056 | } | ||
1057 | #endif | ||
1058 | |||
1059 | static const struct dev_pm_ops sa11x0_dma_pm_ops = { | ||
1060 | .suspend_noirq = sa11x0_dma_suspend, | ||
1061 | .resume_noirq = sa11x0_dma_resume, | ||
1062 | .freeze_noirq = sa11x0_dma_suspend, | ||
1063 | .thaw_noirq = sa11x0_dma_resume, | ||
1064 | .poweroff_noirq = sa11x0_dma_suspend, | ||
1065 | .restore_noirq = sa11x0_dma_resume, | ||
1066 | }; | ||
1067 | |||
1068 | static struct platform_driver sa11x0_dma_driver = { | ||
1069 | .driver = { | ||
1070 | .name = "sa11x0-dma", | ||
1071 | .owner = THIS_MODULE, | ||
1072 | .pm = &sa11x0_dma_pm_ops, | ||
1073 | }, | ||
1074 | .probe = sa11x0_dma_probe, | ||
1075 | .remove = sa11x0_dma_remove, | ||
1076 | }; | ||
1077 | |||
1078 | bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param) | ||
1079 | { | ||
1080 | if (chan->device->dev->driver == &sa11x0_dma_driver.driver) { | ||
1081 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | ||
1082 | const char *p = param; | ||
1083 | |||
1084 | return !strcmp(c->name, p); | ||
1085 | } | ||
1086 | return false; | ||
1087 | } | ||
1088 | EXPORT_SYMBOL(sa11x0_dma_filter_fn); | ||
1089 | |||
1090 | static int __init sa11x0_dma_init(void) | ||
1091 | { | ||
1092 | return platform_driver_register(&sa11x0_dma_driver); | ||
1093 | } | ||
1094 | subsys_initcall(sa11x0_dma_init); | ||
1095 | |||
1096 | static void __exit sa11x0_dma_exit(void) | ||
1097 | { | ||
1098 | platform_driver_unregister(&sa11x0_dma_driver); | ||
1099 | } | ||
1100 | module_exit(sa11x0_dma_exit); | ||
1101 | |||
1102 | MODULE_AUTHOR("Russell King"); | ||
1103 | MODULE_DESCRIPTION("SA-11x0 DMA driver"); | ||
1104 | MODULE_LICENSE("GPL v2"); | ||
1105 | MODULE_ALIAS("platform:sa11x0-dma"); | ||
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile deleted file mode 100644 index 54ae9572b0a..00000000000 --- a/drivers/dma/sh/Makefile +++ /dev/null | |||
@@ -1,2 +0,0 @@ | |||
1 | obj-$(CONFIG_SH_DMAE) += shdma-base.o | ||
2 | obj-$(CONFIG_SH_DMAE) += shdma.o | ||
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c deleted file mode 100644 index f4cd946d259..00000000000 --- a/drivers/dma/sh/shdma-base.c +++ /dev/null | |||
@@ -1,943 +0,0 @@ | |||
1 | /* | ||
2 | * Dmaengine driver base library for DMA controllers, found on SH-based SoCs | ||
3 | * | ||
4 | * extracted from shdma.c | ||
5 | * | ||
6 | * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> | ||
7 | * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> | ||
8 | * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. | ||
9 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | ||
10 | * | ||
11 | * This is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of version 2 of the GNU General Public License as | ||
13 | * published by the Free Software Foundation. | ||
14 | */ | ||
15 | |||
16 | #include <linux/delay.h> | ||
17 | #include <linux/shdma-base.h> | ||
18 | #include <linux/dmaengine.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/pm_runtime.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/spinlock.h> | ||
25 | |||
26 | #include "../dmaengine.h" | ||
27 | |||
28 | /* DMA descriptor control */ | ||
29 | enum shdma_desc_status { | ||
30 | DESC_IDLE, | ||
31 | DESC_PREPARED, | ||
32 | DESC_SUBMITTED, | ||
33 | DESC_COMPLETED, /* completed, have to call callback */ | ||
34 | DESC_WAITING, /* callback called, waiting for ack / re-submit */ | ||
35 | }; | ||
36 | |||
37 | #define NR_DESCS_PER_CHANNEL 32 | ||
38 | |||
39 | #define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan) | ||
40 | #define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev) | ||
41 | |||
42 | /* | ||
43 | * For slave DMA we assume, that there is a finite number of DMA slaves in the | ||
44 | * system, and that each such slave can only use a finite number of channels. | ||
45 | * We use slave channel IDs to make sure, that no such slave channel ID is | ||
46 | * allocated more than once. | ||
47 | */ | ||
48 | static unsigned int slave_num = 256; | ||
49 | module_param(slave_num, uint, 0444); | ||
50 | |||
51 | /* A bitmask with slave_num bits */ | ||
52 | static unsigned long *shdma_slave_used; | ||
53 | |||
54 | /* Called under spin_lock_irq(&schan->chan_lock") */ | ||
55 | static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan) | ||
56 | { | ||
57 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | ||
58 | const struct shdma_ops *ops = sdev->ops; | ||
59 | struct shdma_desc *sdesc; | ||
60 | |||
61 | /* DMA work check */ | ||
62 | if (ops->channel_busy(schan)) | ||
63 | return; | ||
64 | |||
65 | /* Find the first not transferred descriptor */ | ||
66 | list_for_each_entry(sdesc, &schan->ld_queue, node) | ||
67 | if (sdesc->mark == DESC_SUBMITTED) { | ||
68 | ops->start_xfer(schan, sdesc); | ||
69 | break; | ||
70 | } | ||
71 | } | ||
72 | |||
73 | static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx) | ||
74 | { | ||
75 | struct shdma_desc *chunk, *c, *desc = | ||
76 | container_of(tx, struct shdma_desc, async_tx), | ||
77 | *last = desc; | ||
78 | struct shdma_chan *schan = to_shdma_chan(tx->chan); | ||
79 | dma_async_tx_callback callback = tx->callback; | ||
80 | dma_cookie_t cookie; | ||
81 | bool power_up; | ||
82 | |||
83 | spin_lock_irq(&schan->chan_lock); | ||
84 | |||
85 | power_up = list_empty(&schan->ld_queue); | ||
86 | |||
87 | cookie = dma_cookie_assign(tx); | ||
88 | |||
89 | /* Mark all chunks of this descriptor as submitted, move to the queue */ | ||
90 | list_for_each_entry_safe(chunk, c, desc->node.prev, node) { | ||
91 | /* | ||
92 | * All chunks are on the global ld_free, so, we have to find | ||
93 | * the end of the chain ourselves | ||
94 | */ | ||
95 | if (chunk != desc && (chunk->mark == DESC_IDLE || | ||
96 | chunk->async_tx.cookie > 0 || | ||
97 | chunk->async_tx.cookie == -EBUSY || | ||
98 | &chunk->node == &schan->ld_free)) | ||
99 | break; | ||
100 | chunk->mark = DESC_SUBMITTED; | ||
101 | /* Callback goes to the last chunk */ | ||
102 | chunk->async_tx.callback = NULL; | ||
103 | chunk->cookie = cookie; | ||
104 | list_move_tail(&chunk->node, &schan->ld_queue); | ||
105 | last = chunk; | ||
106 | |||
107 | dev_dbg(schan->dev, "submit #%d@%p on %d\n", | ||
108 | tx->cookie, &last->async_tx, schan->id); | ||
109 | } | ||
110 | |||
111 | last->async_tx.callback = callback; | ||
112 | last->async_tx.callback_param = tx->callback_param; | ||
113 | |||
114 | if (power_up) { | ||
115 | int ret; | ||
116 | schan->pm_state = SHDMA_PM_BUSY; | ||
117 | |||
118 | ret = pm_runtime_get(schan->dev); | ||
119 | |||
120 | spin_unlock_irq(&schan->chan_lock); | ||
121 | if (ret < 0) | ||
122 | dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret); | ||
123 | |||
124 | pm_runtime_barrier(schan->dev); | ||
125 | |||
126 | spin_lock_irq(&schan->chan_lock); | ||
127 | |||
128 | /* Have we been reset, while waiting? */ | ||
129 | if (schan->pm_state != SHDMA_PM_ESTABLISHED) { | ||
130 | struct shdma_dev *sdev = | ||
131 | to_shdma_dev(schan->dma_chan.device); | ||
132 | const struct shdma_ops *ops = sdev->ops; | ||
133 | dev_dbg(schan->dev, "Bring up channel %d\n", | ||
134 | schan->id); | ||
135 | /* | ||
136 | * TODO: .xfer_setup() might fail on some platforms. | ||
137 | * Make it int then, on error remove chunks from the | ||
138 | * queue again | ||
139 | */ | ||
140 | ops->setup_xfer(schan, schan->slave_id); | ||
141 | |||
142 | if (schan->pm_state == SHDMA_PM_PENDING) | ||
143 | shdma_chan_xfer_ld_queue(schan); | ||
144 | schan->pm_state = SHDMA_PM_ESTABLISHED; | ||
145 | } | ||
146 | } else { | ||
147 | /* | ||
148 | * Tell .device_issue_pending() not to run the queue, interrupts | ||
149 | * will do it anyway | ||
150 | */ | ||
151 | schan->pm_state = SHDMA_PM_PENDING; | ||
152 | } | ||
153 | |||
154 | spin_unlock_irq(&schan->chan_lock); | ||
155 | |||
156 | return cookie; | ||
157 | } | ||
158 | |||
159 | /* Called with desc_lock held */ | ||
160 | static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan) | ||
161 | { | ||
162 | struct shdma_desc *sdesc; | ||
163 | |||
164 | list_for_each_entry(sdesc, &schan->ld_free, node) | ||
165 | if (sdesc->mark != DESC_PREPARED) { | ||
166 | BUG_ON(sdesc->mark != DESC_IDLE); | ||
167 | list_del(&sdesc->node); | ||
168 | return sdesc; | ||
169 | } | ||
170 | |||
171 | return NULL; | ||
172 | } | ||
173 | |||
174 | static int shdma_setup_slave(struct shdma_chan *schan, int slave_id) | ||
175 | { | ||
176 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | ||
177 | const struct shdma_ops *ops = sdev->ops; | ||
178 | int ret; | ||
179 | |||
180 | if (slave_id < 0 || slave_id >= slave_num) | ||
181 | return -EINVAL; | ||
182 | |||
183 | if (test_and_set_bit(slave_id, shdma_slave_used)) | ||
184 | return -EBUSY; | ||
185 | |||
186 | ret = ops->set_slave(schan, slave_id, false); | ||
187 | if (ret < 0) { | ||
188 | clear_bit(slave_id, shdma_slave_used); | ||
189 | return ret; | ||
190 | } | ||
191 | |||
192 | schan->slave_id = slave_id; | ||
193 | |||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | /* | ||
198 | * This is the standard shdma filter function to be used as a replacement to the | ||
199 | * "old" method, using the .private pointer. If for some reason you allocate a | ||
200 | * channel without slave data, use something like ERR_PTR(-EINVAL) as a filter | ||
201 | * parameter. If this filter is used, the slave driver, after calling | ||
202 | * dma_request_channel(), will also have to call dmaengine_slave_config() with | ||
203 | * .slave_id, .direction, and either .src_addr or .dst_addr set. | ||
204 | * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE | ||
205 | * capability! If this becomes a requirement, hardware glue drivers, using this | ||
206 | * services would have to provide their own filters, which first would check | ||
207 | * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do | ||
208 | * this, and only then, in case of a match, call this common filter. | ||
209 | */ | ||
210 | bool shdma_chan_filter(struct dma_chan *chan, void *arg) | ||
211 | { | ||
212 | struct shdma_chan *schan = to_shdma_chan(chan); | ||
213 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | ||
214 | const struct shdma_ops *ops = sdev->ops; | ||
215 | int slave_id = (int)arg; | ||
216 | int ret; | ||
217 | |||
218 | if (slave_id < 0) | ||
219 | /* No slave requested - arbitrary channel */ | ||
220 | return true; | ||
221 | |||
222 | if (slave_id >= slave_num) | ||
223 | return false; | ||
224 | |||
225 | ret = ops->set_slave(schan, slave_id, true); | ||
226 | if (ret < 0) | ||
227 | return false; | ||
228 | |||
229 | return true; | ||
230 | } | ||
231 | EXPORT_SYMBOL(shdma_chan_filter); | ||
232 | |||
233 | static int shdma_alloc_chan_resources(struct dma_chan *chan) | ||
234 | { | ||
235 | struct shdma_chan *schan = to_shdma_chan(chan); | ||
236 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | ||
237 | const struct shdma_ops *ops = sdev->ops; | ||
238 | struct shdma_desc *desc; | ||
239 | struct shdma_slave *slave = chan->private; | ||
240 | int ret, i; | ||
241 | |||
242 | /* | ||
243 | * This relies on the guarantee from dmaengine that alloc_chan_resources | ||
244 | * never runs concurrently with itself or free_chan_resources. | ||
245 | */ | ||
246 | if (slave) { | ||
247 | /* Legacy mode: .private is set in filter */ | ||
248 | ret = shdma_setup_slave(schan, slave->slave_id); | ||
249 | if (ret < 0) | ||
250 | goto esetslave; | ||
251 | } else { | ||
252 | schan->slave_id = -EINVAL; | ||
253 | } | ||
254 | |||
255 | schan->desc = kcalloc(NR_DESCS_PER_CHANNEL, | ||
256 | sdev->desc_size, GFP_KERNEL); | ||
257 | if (!schan->desc) { | ||
258 | ret = -ENOMEM; | ||
259 | goto edescalloc; | ||
260 | } | ||
261 | schan->desc_num = NR_DESCS_PER_CHANNEL; | ||
262 | |||
263 | for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) { | ||
264 | desc = ops->embedded_desc(schan->desc, i); | ||
265 | dma_async_tx_descriptor_init(&desc->async_tx, | ||
266 | &schan->dma_chan); | ||
267 | desc->async_tx.tx_submit = shdma_tx_submit; | ||
268 | desc->mark = DESC_IDLE; | ||
269 | |||
270 | list_add(&desc->node, &schan->ld_free); | ||
271 | } | ||
272 | |||
273 | return NR_DESCS_PER_CHANNEL; | ||
274 | |||
275 | edescalloc: | ||
276 | if (slave) | ||
277 | esetslave: | ||
278 | clear_bit(slave->slave_id, shdma_slave_used); | ||
279 | chan->private = NULL; | ||
280 | return ret; | ||
281 | } | ||
282 | |||
283 | static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) | ||
284 | { | ||
285 | struct shdma_desc *desc, *_desc; | ||
286 | /* Is the "exposed" head of a chain acked? */ | ||
287 | bool head_acked = false; | ||
288 | dma_cookie_t cookie = 0; | ||
289 | dma_async_tx_callback callback = NULL; | ||
290 | void *param = NULL; | ||
291 | unsigned long flags; | ||
292 | |||
293 | spin_lock_irqsave(&schan->chan_lock, flags); | ||
294 | list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) { | ||
295 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | ||
296 | |||
297 | BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie); | ||
298 | BUG_ON(desc->mark != DESC_SUBMITTED && | ||
299 | desc->mark != DESC_COMPLETED && | ||
300 | desc->mark != DESC_WAITING); | ||
301 | |||
302 | /* | ||
303 | * queue is ordered, and we use this loop to (1) clean up all | ||
304 | * completed descriptors, and to (2) update descriptor flags of | ||
305 | * any chunks in a (partially) completed chain | ||
306 | */ | ||
307 | if (!all && desc->mark == DESC_SUBMITTED && | ||
308 | desc->cookie != cookie) | ||
309 | break; | ||
310 | |||
311 | if (tx->cookie > 0) | ||
312 | cookie = tx->cookie; | ||
313 | |||
314 | if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { | ||
315 | if (schan->dma_chan.completed_cookie != desc->cookie - 1) | ||
316 | dev_dbg(schan->dev, | ||
317 | "Completing cookie %d, expected %d\n", | ||
318 | desc->cookie, | ||
319 | schan->dma_chan.completed_cookie + 1); | ||
320 | schan->dma_chan.completed_cookie = desc->cookie; | ||
321 | } | ||
322 | |||
323 | /* Call callback on the last chunk */ | ||
324 | if (desc->mark == DESC_COMPLETED && tx->callback) { | ||
325 | desc->mark = DESC_WAITING; | ||
326 | callback = tx->callback; | ||
327 | param = tx->callback_param; | ||
328 | dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n", | ||
329 | tx->cookie, tx, schan->id); | ||
330 | BUG_ON(desc->chunks != 1); | ||
331 | break; | ||
332 | } | ||
333 | |||
334 | if (tx->cookie > 0 || tx->cookie == -EBUSY) { | ||
335 | if (desc->mark == DESC_COMPLETED) { | ||
336 | BUG_ON(tx->cookie < 0); | ||
337 | desc->mark = DESC_WAITING; | ||
338 | } | ||
339 | head_acked = async_tx_test_ack(tx); | ||
340 | } else { | ||
341 | switch (desc->mark) { | ||
342 | case DESC_COMPLETED: | ||
343 | desc->mark = DESC_WAITING; | ||
344 | /* Fall through */ | ||
345 | case DESC_WAITING: | ||
346 | if (head_acked) | ||
347 | async_tx_ack(&desc->async_tx); | ||
348 | } | ||
349 | } | ||
350 | |||
351 | dev_dbg(schan->dev, "descriptor %p #%d completed.\n", | ||
352 | tx, tx->cookie); | ||
353 | |||
354 | if (((desc->mark == DESC_COMPLETED || | ||
355 | desc->mark == DESC_WAITING) && | ||
356 | async_tx_test_ack(&desc->async_tx)) || all) { | ||
357 | /* Remove from ld_queue list */ | ||
358 | desc->mark = DESC_IDLE; | ||
359 | |||
360 | list_move(&desc->node, &schan->ld_free); | ||
361 | |||
362 | if (list_empty(&schan->ld_queue)) { | ||
363 | dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); | ||
364 | pm_runtime_put(schan->dev); | ||
365 | schan->pm_state = SHDMA_PM_ESTABLISHED; | ||
366 | } | ||
367 | } | ||
368 | } | ||
369 | |||
370 | if (all && !callback) | ||
371 | /* | ||
372 | * Terminating and the loop completed normally: forgive | ||
373 | * uncompleted cookies | ||
374 | */ | ||
375 | schan->dma_chan.completed_cookie = schan->dma_chan.cookie; | ||
376 | |||
377 | spin_unlock_irqrestore(&schan->chan_lock, flags); | ||
378 | |||
379 | if (callback) | ||
380 | callback(param); | ||
381 | |||
382 | return callback; | ||
383 | } | ||
384 | |||
385 | /* | ||
386 | * shdma_chan_ld_cleanup - Clean up link descriptors | ||
387 | * | ||
388 | * Clean up the ld_queue of DMA channel. | ||
389 | */ | ||
390 | static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all) | ||
391 | { | ||
392 | while (__ld_cleanup(schan, all)) | ||
393 | ; | ||
394 | } | ||
395 | |||
396 | /* | ||
397 | * shdma_free_chan_resources - Free all resources of the channel. | ||
398 | */ | ||
399 | static void shdma_free_chan_resources(struct dma_chan *chan) | ||
400 | { | ||
401 | struct shdma_chan *schan = to_shdma_chan(chan); | ||
402 | struct shdma_dev *sdev = to_shdma_dev(chan->device); | ||
403 | const struct shdma_ops *ops = sdev->ops; | ||
404 | LIST_HEAD(list); | ||
405 | |||
406 | /* Protect against ISR */ | ||
407 | spin_lock_irq(&schan->chan_lock); | ||
408 | ops->halt_channel(schan); | ||
409 | spin_unlock_irq(&schan->chan_lock); | ||
410 | |||
411 | /* Now no new interrupts will occur */ | ||
412 | |||
413 | /* Prepared and not submitted descriptors can still be on the queue */ | ||
414 | if (!list_empty(&schan->ld_queue)) | ||
415 | shdma_chan_ld_cleanup(schan, true); | ||
416 | |||
417 | if (schan->slave_id >= 0) { | ||
418 | /* The caller is holding dma_list_mutex */ | ||
419 | clear_bit(schan->slave_id, shdma_slave_used); | ||
420 | chan->private = NULL; | ||
421 | } | ||
422 | |||
423 | spin_lock_irq(&schan->chan_lock); | ||
424 | |||
425 | list_splice_init(&schan->ld_free, &list); | ||
426 | schan->desc_num = 0; | ||
427 | |||
428 | spin_unlock_irq(&schan->chan_lock); | ||
429 | |||
430 | kfree(schan->desc); | ||
431 | } | ||
432 | |||
433 | /** | ||
434 | * shdma_add_desc - get, set up and return one transfer descriptor | ||
435 | * @schan: DMA channel | ||
436 | * @flags: DMA transfer flags | ||
437 | * @dst: destination DMA address, incremented when direction equals | ||
438 | * DMA_DEV_TO_MEM or DMA_MEM_TO_MEM | ||
439 | * @src: source DMA address, incremented when direction equals | ||
440 | * DMA_MEM_TO_DEV or DMA_MEM_TO_MEM | ||
441 | * @len: DMA transfer length | ||
442 | * @first: if NULL, set to the current descriptor and cookie set to -EBUSY | ||
443 | * @direction: needed for slave DMA to decide which address to keep constant, | ||
444 | * equals DMA_MEM_TO_MEM for MEMCPY | ||
445 | * Returns 0 or an error | ||
446 | * Locks: called with desc_lock held | ||
447 | */ | ||
448 | static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan, | ||
449 | unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len, | ||
450 | struct shdma_desc **first, enum dma_transfer_direction direction) | ||
451 | { | ||
452 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | ||
453 | const struct shdma_ops *ops = sdev->ops; | ||
454 | struct shdma_desc *new; | ||
455 | size_t copy_size = *len; | ||
456 | |||
457 | if (!copy_size) | ||
458 | return NULL; | ||
459 | |||
460 | /* Allocate the link descriptor from the free list */ | ||
461 | new = shdma_get_desc(schan); | ||
462 | if (!new) { | ||
463 | dev_err(schan->dev, "No free link descriptor available\n"); | ||
464 | return NULL; | ||
465 | } | ||
466 | |||
467 | ops->desc_setup(schan, new, *src, *dst, ©_size); | ||
468 | |||
469 | if (!*first) { | ||
470 | /* First desc */ | ||
471 | new->async_tx.cookie = -EBUSY; | ||
472 | *first = new; | ||
473 | } else { | ||
474 | /* Other desc - invisible to the user */ | ||
475 | new->async_tx.cookie = -EINVAL; | ||
476 | } | ||
477 | |||
478 | dev_dbg(schan->dev, | ||
479 | "chaining (%u/%u)@%x -> %x with %p, cookie %d\n", | ||
480 | copy_size, *len, *src, *dst, &new->async_tx, | ||
481 | new->async_tx.cookie); | ||
482 | |||
483 | new->mark = DESC_PREPARED; | ||
484 | new->async_tx.flags = flags; | ||
485 | new->direction = direction; | ||
486 | new->partial = 0; | ||
487 | |||
488 | *len -= copy_size; | ||
489 | if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) | ||
490 | *src += copy_size; | ||
491 | if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM) | ||
492 | *dst += copy_size; | ||
493 | |||
494 | return new; | ||
495 | } | ||
496 | |||
497 | /* | ||
498 | * shdma_prep_sg - prepare transfer descriptors from an SG list | ||
499 | * | ||
500 | * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also | ||
501 | * converted to scatter-gather to guarantee consistent locking and a correct | ||
502 | * list manipulation. For slave DMA direction carries the usual meaning, and, | ||
503 | * logically, the SG list is RAM and the addr variable contains slave address, | ||
504 | * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM | ||
505 | * and the SG list contains only one element and points at the source buffer. | ||
506 | */ | ||
507 | static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan, | ||
508 | struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, | ||
509 | enum dma_transfer_direction direction, unsigned long flags) | ||
510 | { | ||
511 | struct scatterlist *sg; | ||
512 | struct shdma_desc *first = NULL, *new = NULL /* compiler... */; | ||
513 | LIST_HEAD(tx_list); | ||
514 | int chunks = 0; | ||
515 | unsigned long irq_flags; | ||
516 | int i; | ||
517 | |||
518 | for_each_sg(sgl, sg, sg_len, i) | ||
519 | chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len); | ||
520 | |||
521 | /* Have to lock the whole loop to protect against concurrent release */ | ||
522 | spin_lock_irqsave(&schan->chan_lock, irq_flags); | ||
523 | |||
524 | /* | ||
525 | * Chaining: | ||
526 | * first descriptor is what user is dealing with in all API calls, its | ||
527 | * cookie is at first set to -EBUSY, at tx-submit to a positive | ||
528 | * number | ||
529 | * if more than one chunk is needed further chunks have cookie = -EINVAL | ||
530 | * the last chunk, if not equal to the first, has cookie = -ENOSPC | ||
531 | * all chunks are linked onto the tx_list head with their .node heads | ||
532 | * only during this function, then they are immediately spliced | ||
533 | * back onto the free list in form of a chain | ||
534 | */ | ||
535 | for_each_sg(sgl, sg, sg_len, i) { | ||
536 | dma_addr_t sg_addr = sg_dma_address(sg); | ||
537 | size_t len = sg_dma_len(sg); | ||
538 | |||
539 | if (!len) | ||
540 | goto err_get_desc; | ||
541 | |||
542 | do { | ||
543 | dev_dbg(schan->dev, "Add SG #%d@%p[%d], dma %llx\n", | ||
544 | i, sg, len, (unsigned long long)sg_addr); | ||
545 | |||
546 | if (direction == DMA_DEV_TO_MEM) | ||
547 | new = shdma_add_desc(schan, flags, | ||
548 | &sg_addr, addr, &len, &first, | ||
549 | direction); | ||
550 | else | ||
551 | new = shdma_add_desc(schan, flags, | ||
552 | addr, &sg_addr, &len, &first, | ||
553 | direction); | ||
554 | if (!new) | ||
555 | goto err_get_desc; | ||
556 | |||
557 | new->chunks = chunks--; | ||
558 | list_add_tail(&new->node, &tx_list); | ||
559 | } while (len); | ||
560 | } | ||
561 | |||
562 | if (new != first) | ||
563 | new->async_tx.cookie = -ENOSPC; | ||
564 | |||
565 | /* Put them back on the free list, so, they don't get lost */ | ||
566 | list_splice_tail(&tx_list, &schan->ld_free); | ||
567 | |||
568 | spin_unlock_irqrestore(&schan->chan_lock, irq_flags); | ||
569 | |||
570 | return &first->async_tx; | ||
571 | |||
572 | err_get_desc: | ||
573 | list_for_each_entry(new, &tx_list, node) | ||
574 | new->mark = DESC_IDLE; | ||
575 | list_splice(&tx_list, &schan->ld_free); | ||
576 | |||
577 | spin_unlock_irqrestore(&schan->chan_lock, irq_flags); | ||
578 | |||
579 | return NULL; | ||
580 | } | ||
581 | |||
582 | static struct dma_async_tx_descriptor *shdma_prep_memcpy( | ||
583 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, | ||
584 | size_t len, unsigned long flags) | ||
585 | { | ||
586 | struct shdma_chan *schan = to_shdma_chan(chan); | ||
587 | struct scatterlist sg; | ||
588 | |||
589 | if (!chan || !len) | ||
590 | return NULL; | ||
591 | |||
592 | BUG_ON(!schan->desc_num); | ||
593 | |||
594 | sg_init_table(&sg, 1); | ||
595 | sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len, | ||
596 | offset_in_page(dma_src)); | ||
597 | sg_dma_address(&sg) = dma_src; | ||
598 | sg_dma_len(&sg) = len; | ||
599 | |||
600 | return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, flags); | ||
601 | } | ||
602 | |||
603 | static struct dma_async_tx_descriptor *shdma_prep_slave_sg( | ||
604 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, | ||
605 | enum dma_transfer_direction direction, unsigned long flags, void *context) | ||
606 | { | ||
607 | struct shdma_chan *schan = to_shdma_chan(chan); | ||
608 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | ||
609 | const struct shdma_ops *ops = sdev->ops; | ||
610 | int slave_id = schan->slave_id; | ||
611 | dma_addr_t slave_addr; | ||
612 | |||
613 | if (!chan) | ||
614 | return NULL; | ||
615 | |||
616 | BUG_ON(!schan->desc_num); | ||
617 | |||
618 | /* Someone calling slave DMA on a generic channel? */ | ||
619 | if (slave_id < 0 || !sg_len) { | ||
620 | dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n", | ||
621 | __func__, sg_len, slave_id); | ||
622 | return NULL; | ||
623 | } | ||
624 | |||
625 | slave_addr = ops->slave_addr(schan); | ||
626 | |||
627 | return shdma_prep_sg(schan, sgl, sg_len, &slave_addr, | ||
628 | direction, flags); | ||
629 | } | ||
630 | |||
631 | static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
632 | unsigned long arg) | ||
633 | { | ||
634 | struct shdma_chan *schan = to_shdma_chan(chan); | ||
635 | struct shdma_dev *sdev = to_shdma_dev(chan->device); | ||
636 | const struct shdma_ops *ops = sdev->ops; | ||
637 | struct dma_slave_config *config; | ||
638 | unsigned long flags; | ||
639 | int ret; | ||
640 | |||
641 | if (!chan) | ||
642 | return -EINVAL; | ||
643 | |||
644 | switch (cmd) { | ||
645 | case DMA_TERMINATE_ALL: | ||
646 | spin_lock_irqsave(&schan->chan_lock, flags); | ||
647 | ops->halt_channel(schan); | ||
648 | |||
649 | if (ops->get_partial && !list_empty(&schan->ld_queue)) { | ||
650 | /* Record partial transfer */ | ||
651 | struct shdma_desc *desc = list_first_entry(&schan->ld_queue, | ||
652 | struct shdma_desc, node); | ||
653 | desc->partial = ops->get_partial(schan, desc); | ||
654 | } | ||
655 | |||
656 | spin_unlock_irqrestore(&schan->chan_lock, flags); | ||
657 | |||
658 | shdma_chan_ld_cleanup(schan, true); | ||
659 | break; | ||
660 | case DMA_SLAVE_CONFIG: | ||
661 | /* | ||
662 | * So far only .slave_id is used, but the slave drivers are | ||
663 | * encouraged to also set a transfer direction and an address. | ||
664 | */ | ||
665 | if (!arg) | ||
666 | return -EINVAL; | ||
667 | /* | ||
668 | * We could lock this, but you shouldn't be configuring the | ||
669 | * channel, while using it... | ||
670 | */ | ||
671 | config = (struct dma_slave_config *)arg; | ||
672 | ret = shdma_setup_slave(schan, config->slave_id); | ||
673 | if (ret < 0) | ||
674 | return ret; | ||
675 | break; | ||
676 | default: | ||
677 | return -ENXIO; | ||
678 | } | ||
679 | |||
680 | return 0; | ||
681 | } | ||
682 | |||
683 | static void shdma_issue_pending(struct dma_chan *chan) | ||
684 | { | ||
685 | struct shdma_chan *schan = to_shdma_chan(chan); | ||
686 | |||
687 | spin_lock_irq(&schan->chan_lock); | ||
688 | if (schan->pm_state == SHDMA_PM_ESTABLISHED) | ||
689 | shdma_chan_xfer_ld_queue(schan); | ||
690 | else | ||
691 | schan->pm_state = SHDMA_PM_PENDING; | ||
692 | spin_unlock_irq(&schan->chan_lock); | ||
693 | } | ||
694 | |||
695 | static enum dma_status shdma_tx_status(struct dma_chan *chan, | ||
696 | dma_cookie_t cookie, | ||
697 | struct dma_tx_state *txstate) | ||
698 | { | ||
699 | struct shdma_chan *schan = to_shdma_chan(chan); | ||
700 | enum dma_status status; | ||
701 | unsigned long flags; | ||
702 | |||
703 | shdma_chan_ld_cleanup(schan, false); | ||
704 | |||
705 | spin_lock_irqsave(&schan->chan_lock, flags); | ||
706 | |||
707 | status = dma_cookie_status(chan, cookie, txstate); | ||
708 | |||
709 | /* | ||
710 | * If we don't find cookie on the queue, it has been aborted and we have | ||
711 | * to report error | ||
712 | */ | ||
713 | if (status != DMA_SUCCESS) { | ||
714 | struct shdma_desc *sdesc; | ||
715 | status = DMA_ERROR; | ||
716 | list_for_each_entry(sdesc, &schan->ld_queue, node) | ||
717 | if (sdesc->cookie == cookie) { | ||
718 | status = DMA_IN_PROGRESS; | ||
719 | break; | ||
720 | } | ||
721 | } | ||
722 | |||
723 | spin_unlock_irqrestore(&schan->chan_lock, flags); | ||
724 | |||
725 | return status; | ||
726 | } | ||
727 | |||
728 | /* Called from error IRQ or NMI */ | ||
729 | bool shdma_reset(struct shdma_dev *sdev) | ||
730 | { | ||
731 | const struct shdma_ops *ops = sdev->ops; | ||
732 | struct shdma_chan *schan; | ||
733 | unsigned int handled = 0; | ||
734 | int i; | ||
735 | |||
736 | /* Reset all channels */ | ||
737 | shdma_for_each_chan(schan, sdev, i) { | ||
738 | struct shdma_desc *sdesc; | ||
739 | LIST_HEAD(dl); | ||
740 | |||
741 | if (!schan) | ||
742 | continue; | ||
743 | |||
744 | spin_lock(&schan->chan_lock); | ||
745 | |||
746 | /* Stop the channel */ | ||
747 | ops->halt_channel(schan); | ||
748 | |||
749 | list_splice_init(&schan->ld_queue, &dl); | ||
750 | |||
751 | if (!list_empty(&dl)) { | ||
752 | dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); | ||
753 | pm_runtime_put(schan->dev); | ||
754 | } | ||
755 | schan->pm_state = SHDMA_PM_ESTABLISHED; | ||
756 | |||
757 | spin_unlock(&schan->chan_lock); | ||
758 | |||
759 | /* Complete all */ | ||
760 | list_for_each_entry(sdesc, &dl, node) { | ||
761 | struct dma_async_tx_descriptor *tx = &sdesc->async_tx; | ||
762 | sdesc->mark = DESC_IDLE; | ||
763 | if (tx->callback) | ||
764 | tx->callback(tx->callback_param); | ||
765 | } | ||
766 | |||
767 | spin_lock(&schan->chan_lock); | ||
768 | list_splice(&dl, &schan->ld_free); | ||
769 | spin_unlock(&schan->chan_lock); | ||
770 | |||
771 | handled++; | ||
772 | } | ||
773 | |||
774 | return !!handled; | ||
775 | } | ||
776 | EXPORT_SYMBOL(shdma_reset); | ||
777 | |||
778 | static irqreturn_t chan_irq(int irq, void *dev) | ||
779 | { | ||
780 | struct shdma_chan *schan = dev; | ||
781 | const struct shdma_ops *ops = | ||
782 | to_shdma_dev(schan->dma_chan.device)->ops; | ||
783 | irqreturn_t ret; | ||
784 | |||
785 | spin_lock(&schan->chan_lock); | ||
786 | |||
787 | ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE; | ||
788 | |||
789 | spin_unlock(&schan->chan_lock); | ||
790 | |||
791 | return ret; | ||
792 | } | ||
793 | |||
794 | static irqreturn_t chan_irqt(int irq, void *dev) | ||
795 | { | ||
796 | struct shdma_chan *schan = dev; | ||
797 | const struct shdma_ops *ops = | ||
798 | to_shdma_dev(schan->dma_chan.device)->ops; | ||
799 | struct shdma_desc *sdesc; | ||
800 | |||
801 | spin_lock_irq(&schan->chan_lock); | ||
802 | list_for_each_entry(sdesc, &schan->ld_queue, node) { | ||
803 | if (sdesc->mark == DESC_SUBMITTED && | ||
804 | ops->desc_completed(schan, sdesc)) { | ||
805 | dev_dbg(schan->dev, "done #%d@%p\n", | ||
806 | sdesc->async_tx.cookie, &sdesc->async_tx); | ||
807 | sdesc->mark = DESC_COMPLETED; | ||
808 | break; | ||
809 | } | ||
810 | } | ||
811 | /* Next desc */ | ||
812 | shdma_chan_xfer_ld_queue(schan); | ||
813 | spin_unlock_irq(&schan->chan_lock); | ||
814 | |||
815 | shdma_chan_ld_cleanup(schan, false); | ||
816 | |||
817 | return IRQ_HANDLED; | ||
818 | } | ||
819 | |||
820 | int shdma_request_irq(struct shdma_chan *schan, int irq, | ||
821 | unsigned long flags, const char *name) | ||
822 | { | ||
823 | int ret = request_threaded_irq(irq, chan_irq, chan_irqt, | ||
824 | flags, name, schan); | ||
825 | |||
826 | schan->irq = ret < 0 ? ret : irq; | ||
827 | |||
828 | return ret; | ||
829 | } | ||
830 | EXPORT_SYMBOL(shdma_request_irq); | ||
831 | |||
832 | void shdma_free_irq(struct shdma_chan *schan) | ||
833 | { | ||
834 | if (schan->irq >= 0) | ||
835 | free_irq(schan->irq, schan); | ||
836 | } | ||
837 | EXPORT_SYMBOL(shdma_free_irq); | ||
838 | |||
839 | void shdma_chan_probe(struct shdma_dev *sdev, | ||
840 | struct shdma_chan *schan, int id) | ||
841 | { | ||
842 | schan->pm_state = SHDMA_PM_ESTABLISHED; | ||
843 | |||
844 | /* reference struct dma_device */ | ||
845 | schan->dma_chan.device = &sdev->dma_dev; | ||
846 | dma_cookie_init(&schan->dma_chan); | ||
847 | |||
848 | schan->dev = sdev->dma_dev.dev; | ||
849 | schan->id = id; | ||
850 | |||
851 | if (!schan->max_xfer_len) | ||
852 | schan->max_xfer_len = PAGE_SIZE; | ||
853 | |||
854 | spin_lock_init(&schan->chan_lock); | ||
855 | |||
856 | /* Init descripter manage list */ | ||
857 | INIT_LIST_HEAD(&schan->ld_queue); | ||
858 | INIT_LIST_HEAD(&schan->ld_free); | ||
859 | |||
860 | /* Add the channel to DMA device channel list */ | ||
861 | list_add_tail(&schan->dma_chan.device_node, | ||
862 | &sdev->dma_dev.channels); | ||
863 | sdev->schan[sdev->dma_dev.chancnt++] = schan; | ||
864 | } | ||
865 | EXPORT_SYMBOL(shdma_chan_probe); | ||
866 | |||
867 | void shdma_chan_remove(struct shdma_chan *schan) | ||
868 | { | ||
869 | list_del(&schan->dma_chan.device_node); | ||
870 | } | ||
871 | EXPORT_SYMBOL(shdma_chan_remove); | ||
872 | |||
873 | int shdma_init(struct device *dev, struct shdma_dev *sdev, | ||
874 | int chan_num) | ||
875 | { | ||
876 | struct dma_device *dma_dev = &sdev->dma_dev; | ||
877 | |||
878 | /* | ||
879 | * Require all call-backs for now, they can trivially be made optional | ||
880 | * later as required | ||
881 | */ | ||
882 | if (!sdev->ops || | ||
883 | !sdev->desc_size || | ||
884 | !sdev->ops->embedded_desc || | ||
885 | !sdev->ops->start_xfer || | ||
886 | !sdev->ops->setup_xfer || | ||
887 | !sdev->ops->set_slave || | ||
888 | !sdev->ops->desc_setup || | ||
889 | !sdev->ops->slave_addr || | ||
890 | !sdev->ops->channel_busy || | ||
891 | !sdev->ops->halt_channel || | ||
892 | !sdev->ops->desc_completed) | ||
893 | return -EINVAL; | ||
894 | |||
895 | sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL); | ||
896 | if (!sdev->schan) | ||
897 | return -ENOMEM; | ||
898 | |||
899 | INIT_LIST_HEAD(&dma_dev->channels); | ||
900 | |||
901 | /* Common and MEMCPY operations */ | ||
902 | dma_dev->device_alloc_chan_resources | ||
903 | = shdma_alloc_chan_resources; | ||
904 | dma_dev->device_free_chan_resources = shdma_free_chan_resources; | ||
905 | dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy; | ||
906 | dma_dev->device_tx_status = shdma_tx_status; | ||
907 | dma_dev->device_issue_pending = shdma_issue_pending; | ||
908 | |||
909 | /* Compulsory for DMA_SLAVE fields */ | ||
910 | dma_dev->device_prep_slave_sg = shdma_prep_slave_sg; | ||
911 | dma_dev->device_control = shdma_control; | ||
912 | |||
913 | dma_dev->dev = dev; | ||
914 | |||
915 | return 0; | ||
916 | } | ||
917 | EXPORT_SYMBOL(shdma_init); | ||
918 | |||
919 | void shdma_cleanup(struct shdma_dev *sdev) | ||
920 | { | ||
921 | kfree(sdev->schan); | ||
922 | } | ||
923 | EXPORT_SYMBOL(shdma_cleanup); | ||
924 | |||
925 | static int __init shdma_enter(void) | ||
926 | { | ||
927 | shdma_slave_used = kzalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG) * | ||
928 | sizeof(long), GFP_KERNEL); | ||
929 | if (!shdma_slave_used) | ||
930 | return -ENOMEM; | ||
931 | return 0; | ||
932 | } | ||
933 | module_init(shdma_enter); | ||
934 | |||
935 | static void __exit shdma_exit(void) | ||
936 | { | ||
937 | kfree(shdma_slave_used); | ||
938 | } | ||
939 | module_exit(shdma_exit); | ||
940 | |||
941 | MODULE_LICENSE("GPL v2"); | ||
942 | MODULE_DESCRIPTION("SH-DMA driver base library"); | ||
943 | MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); | ||
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c deleted file mode 100644 index 3315e4be9b8..00000000000 --- a/drivers/dma/sh/shdma.c +++ /dev/null | |||
@@ -1,955 +0,0 @@ | |||
1 | /* | ||
2 | * Renesas SuperH DMA Engine support | ||
3 | * | ||
4 | * base is drivers/dma/flsdma.c | ||
5 | * | ||
6 | * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> | ||
7 | * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> | ||
8 | * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. | ||
9 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | ||
10 | * | ||
11 | * This is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2 of the License, or | ||
14 | * (at your option) any later version. | ||
15 | * | ||
16 | * - DMA of SuperH does not have Hardware DMA chain mode. | ||
17 | * - MAX DMA size is 16MB. | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | #include <linux/init.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/dmaengine.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/platform_device.h> | ||
28 | #include <linux/pm_runtime.h> | ||
29 | #include <linux/sh_dma.h> | ||
30 | #include <linux/notifier.h> | ||
31 | #include <linux/kdebug.h> | ||
32 | #include <linux/spinlock.h> | ||
33 | #include <linux/rculist.h> | ||
34 | |||
35 | #include "../dmaengine.h" | ||
36 | #include "shdma.h" | ||
37 | |||
38 | #define SH_DMAE_DRV_NAME "sh-dma-engine" | ||
39 | |||
40 | /* Default MEMCPY transfer size = 2^2 = 4 bytes */ | ||
41 | #define LOG2_DEFAULT_XFER_SIZE 2 | ||
42 | #define SH_DMA_SLAVE_NUMBER 256 | ||
43 | #define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1) | ||
44 | |||
45 | /* | ||
46 | * Used for write-side mutual exclusion for the global device list, | ||
47 | * read-side synchronization by way of RCU, and per-controller data. | ||
48 | */ | ||
49 | static DEFINE_SPINLOCK(sh_dmae_lock); | ||
50 | static LIST_HEAD(sh_dmae_devices); | ||
51 | |||
52 | static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data) | ||
53 | { | ||
54 | struct sh_dmae_device *shdev = to_sh_dev(sh_dc); | ||
55 | |||
56 | __raw_writel(data, shdev->chan_reg + | ||
57 | shdev->pdata->channel[sh_dc->shdma_chan.id].chclr_offset); | ||
58 | } | ||
59 | |||
60 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) | ||
61 | { | ||
62 | __raw_writel(data, sh_dc->base + reg / sizeof(u32)); | ||
63 | } | ||
64 | |||
65 | static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) | ||
66 | { | ||
67 | return __raw_readl(sh_dc->base + reg / sizeof(u32)); | ||
68 | } | ||
69 | |||
70 | static u16 dmaor_read(struct sh_dmae_device *shdev) | ||
71 | { | ||
72 | u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32); | ||
73 | |||
74 | if (shdev->pdata->dmaor_is_32bit) | ||
75 | return __raw_readl(addr); | ||
76 | else | ||
77 | return __raw_readw(addr); | ||
78 | } | ||
79 | |||
80 | static void dmaor_write(struct sh_dmae_device *shdev, u16 data) | ||
81 | { | ||
82 | u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32); | ||
83 | |||
84 | if (shdev->pdata->dmaor_is_32bit) | ||
85 | __raw_writel(data, addr); | ||
86 | else | ||
87 | __raw_writew(data, addr); | ||
88 | } | ||
89 | |||
90 | static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data) | ||
91 | { | ||
92 | struct sh_dmae_device *shdev = to_sh_dev(sh_dc); | ||
93 | |||
94 | __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32)); | ||
95 | } | ||
96 | |||
97 | static u32 chcr_read(struct sh_dmae_chan *sh_dc) | ||
98 | { | ||
99 | struct sh_dmae_device *shdev = to_sh_dev(sh_dc); | ||
100 | |||
101 | return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32)); | ||
102 | } | ||
103 | |||
104 | /* | ||
105 | * Reset DMA controller | ||
106 | * | ||
107 | * SH7780 has two DMAOR register | ||
108 | */ | ||
109 | static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) | ||
110 | { | ||
111 | unsigned short dmaor; | ||
112 | unsigned long flags; | ||
113 | |||
114 | spin_lock_irqsave(&sh_dmae_lock, flags); | ||
115 | |||
116 | dmaor = dmaor_read(shdev); | ||
117 | dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); | ||
118 | |||
119 | spin_unlock_irqrestore(&sh_dmae_lock, flags); | ||
120 | } | ||
121 | |||
122 | static int sh_dmae_rst(struct sh_dmae_device *shdev) | ||
123 | { | ||
124 | unsigned short dmaor; | ||
125 | unsigned long flags; | ||
126 | |||
127 | spin_lock_irqsave(&sh_dmae_lock, flags); | ||
128 | |||
129 | dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); | ||
130 | |||
131 | if (shdev->pdata->chclr_present) { | ||
132 | int i; | ||
133 | for (i = 0; i < shdev->pdata->channel_num; i++) { | ||
134 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | ||
135 | if (sh_chan) | ||
136 | chclr_write(sh_chan, 0); | ||
137 | } | ||
138 | } | ||
139 | |||
140 | dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init); | ||
141 | |||
142 | dmaor = dmaor_read(shdev); | ||
143 | |||
144 | spin_unlock_irqrestore(&sh_dmae_lock, flags); | ||
145 | |||
146 | if (dmaor & (DMAOR_AE | DMAOR_NMIF)) { | ||
147 | dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n"); | ||
148 | return -EIO; | ||
149 | } | ||
150 | if (shdev->pdata->dmaor_init & ~dmaor) | ||
151 | dev_warn(shdev->shdma_dev.dma_dev.dev, | ||
152 | "DMAOR=0x%x hasn't latched the initial value 0x%x.\n", | ||
153 | dmaor, shdev->pdata->dmaor_init); | ||
154 | return 0; | ||
155 | } | ||
156 | |||
157 | static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) | ||
158 | { | ||
159 | u32 chcr = chcr_read(sh_chan); | ||
160 | |||
161 | if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) | ||
162 | return true; /* working */ | ||
163 | |||
164 | return false; /* waiting */ | ||
165 | } | ||
166 | |||
167 | static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) | ||
168 | { | ||
169 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | ||
170 | struct sh_dmae_pdata *pdata = shdev->pdata; | ||
171 | int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | | ||
172 | ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); | ||
173 | |||
174 | if (cnt >= pdata->ts_shift_num) | ||
175 | cnt = 0; | ||
176 | |||
177 | return pdata->ts_shift[cnt]; | ||
178 | } | ||
179 | |||
180 | static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) | ||
181 | { | ||
182 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | ||
183 | struct sh_dmae_pdata *pdata = shdev->pdata; | ||
184 | int i; | ||
185 | |||
186 | for (i = 0; i < pdata->ts_shift_num; i++) | ||
187 | if (pdata->ts_shift[i] == l2size) | ||
188 | break; | ||
189 | |||
190 | if (i == pdata->ts_shift_num) | ||
191 | i = 0; | ||
192 | |||
193 | return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) | | ||
194 | ((i << pdata->ts_high_shift) & pdata->ts_high_mask); | ||
195 | } | ||
196 | |||
197 | static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) | ||
198 | { | ||
199 | sh_dmae_writel(sh_chan, hw->sar, SAR); | ||
200 | sh_dmae_writel(sh_chan, hw->dar, DAR); | ||
201 | sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR); | ||
202 | } | ||
203 | |||
204 | static void dmae_start(struct sh_dmae_chan *sh_chan) | ||
205 | { | ||
206 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | ||
207 | u32 chcr = chcr_read(sh_chan); | ||
208 | |||
209 | if (shdev->pdata->needs_tend_set) | ||
210 | sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND); | ||
211 | |||
212 | chcr |= CHCR_DE | shdev->chcr_ie_bit; | ||
213 | chcr_write(sh_chan, chcr & ~CHCR_TE); | ||
214 | } | ||
215 | |||
216 | static void dmae_init(struct sh_dmae_chan *sh_chan) | ||
217 | { | ||
218 | /* | ||
219 | * Default configuration for dual address memory-memory transfer. | ||
220 | * 0x400 represents auto-request. | ||
221 | */ | ||
222 | u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, | ||
223 | LOG2_DEFAULT_XFER_SIZE); | ||
224 | sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); | ||
225 | chcr_write(sh_chan, chcr); | ||
226 | } | ||
227 | |||
228 | static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) | ||
229 | { | ||
230 | /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */ | ||
231 | if (dmae_is_busy(sh_chan)) | ||
232 | return -EBUSY; | ||
233 | |||
234 | sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); | ||
235 | chcr_write(sh_chan, val); | ||
236 | |||
237 | return 0; | ||
238 | } | ||
239 | |||
240 | static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) | ||
241 | { | ||
242 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | ||
243 | struct sh_dmae_pdata *pdata = shdev->pdata; | ||
244 | const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id]; | ||
245 | u16 __iomem *addr = shdev->dmars; | ||
246 | unsigned int shift = chan_pdata->dmars_bit; | ||
247 | |||
248 | if (dmae_is_busy(sh_chan)) | ||
249 | return -EBUSY; | ||
250 | |||
251 | if (pdata->no_dmars) | ||
252 | return 0; | ||
253 | |||
254 | /* in the case of a missing DMARS resource use first memory window */ | ||
255 | if (!addr) | ||
256 | addr = (u16 __iomem *)shdev->chan_reg; | ||
257 | addr += chan_pdata->dmars / sizeof(u16); | ||
258 | |||
259 | __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), | ||
260 | addr); | ||
261 | |||
262 | return 0; | ||
263 | } | ||
264 | |||
265 | static void sh_dmae_start_xfer(struct shdma_chan *schan, | ||
266 | struct shdma_desc *sdesc) | ||
267 | { | ||
268 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, | ||
269 | shdma_chan); | ||
270 | struct sh_dmae_desc *sh_desc = container_of(sdesc, | ||
271 | struct sh_dmae_desc, shdma_desc); | ||
272 | dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n", | ||
273 | sdesc->async_tx.cookie, sh_chan->shdma_chan.id, | ||
274 | sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar); | ||
275 | /* Get the ld start address from ld_queue */ | ||
276 | dmae_set_reg(sh_chan, &sh_desc->hw); | ||
277 | dmae_start(sh_chan); | ||
278 | } | ||
279 | |||
280 | static bool sh_dmae_channel_busy(struct shdma_chan *schan) | ||
281 | { | ||
282 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, | ||
283 | shdma_chan); | ||
284 | return dmae_is_busy(sh_chan); | ||
285 | } | ||
286 | |||
287 | static void sh_dmae_setup_xfer(struct shdma_chan *schan, | ||
288 | int slave_id) | ||
289 | { | ||
290 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, | ||
291 | shdma_chan); | ||
292 | |||
293 | if (slave_id >= 0) { | ||
294 | const struct sh_dmae_slave_config *cfg = | ||
295 | sh_chan->config; | ||
296 | |||
297 | dmae_set_dmars(sh_chan, cfg->mid_rid); | ||
298 | dmae_set_chcr(sh_chan, cfg->chcr); | ||
299 | } else { | ||
300 | dmae_init(sh_chan); | ||
301 | } | ||
302 | } | ||
303 | |||
304 | static const struct sh_dmae_slave_config *dmae_find_slave( | ||
305 | struct sh_dmae_chan *sh_chan, int slave_id) | ||
306 | { | ||
307 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | ||
308 | struct sh_dmae_pdata *pdata = shdev->pdata; | ||
309 | const struct sh_dmae_slave_config *cfg; | ||
310 | int i; | ||
311 | |||
312 | if (slave_id >= SH_DMA_SLAVE_NUMBER) | ||
313 | return NULL; | ||
314 | |||
315 | for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) | ||
316 | if (cfg->slave_id == slave_id) | ||
317 | return cfg; | ||
318 | |||
319 | return NULL; | ||
320 | } | ||
321 | |||
322 | static int sh_dmae_set_slave(struct shdma_chan *schan, | ||
323 | int slave_id, bool try) | ||
324 | { | ||
325 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, | ||
326 | shdma_chan); | ||
327 | const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id); | ||
328 | if (!cfg) | ||
329 | return -ENODEV; | ||
330 | |||
331 | if (!try) | ||
332 | sh_chan->config = cfg; | ||
333 | |||
334 | return 0; | ||
335 | } | ||
336 | |||
337 | static void dmae_halt(struct sh_dmae_chan *sh_chan) | ||
338 | { | ||
339 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | ||
340 | u32 chcr = chcr_read(sh_chan); | ||
341 | |||
342 | chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit); | ||
343 | chcr_write(sh_chan, chcr); | ||
344 | } | ||
345 | |||
346 | static int sh_dmae_desc_setup(struct shdma_chan *schan, | ||
347 | struct shdma_desc *sdesc, | ||
348 | dma_addr_t src, dma_addr_t dst, size_t *len) | ||
349 | { | ||
350 | struct sh_dmae_desc *sh_desc = container_of(sdesc, | ||
351 | struct sh_dmae_desc, shdma_desc); | ||
352 | |||
353 | if (*len > schan->max_xfer_len) | ||
354 | *len = schan->max_xfer_len; | ||
355 | |||
356 | sh_desc->hw.sar = src; | ||
357 | sh_desc->hw.dar = dst; | ||
358 | sh_desc->hw.tcr = *len; | ||
359 | |||
360 | return 0; | ||
361 | } | ||
362 | |||
363 | static void sh_dmae_halt(struct shdma_chan *schan) | ||
364 | { | ||
365 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, | ||
366 | shdma_chan); | ||
367 | dmae_halt(sh_chan); | ||
368 | } | ||
369 | |||
370 | static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq) | ||
371 | { | ||
372 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, | ||
373 | shdma_chan); | ||
374 | |||
375 | if (!(chcr_read(sh_chan) & CHCR_TE)) | ||
376 | return false; | ||
377 | |||
378 | /* DMA stop */ | ||
379 | dmae_halt(sh_chan); | ||
380 | |||
381 | return true; | ||
382 | } | ||
383 | |||
384 | static size_t sh_dmae_get_partial(struct shdma_chan *schan, | ||
385 | struct shdma_desc *sdesc) | ||
386 | { | ||
387 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, | ||
388 | shdma_chan); | ||
389 | struct sh_dmae_desc *sh_desc = container_of(sdesc, | ||
390 | struct sh_dmae_desc, shdma_desc); | ||
391 | return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << | ||
392 | sh_chan->xmit_shift; | ||
393 | } | ||
394 | |||
395 | /* Called from error IRQ or NMI */ | ||
396 | static bool sh_dmae_reset(struct sh_dmae_device *shdev) | ||
397 | { | ||
398 | bool ret; | ||
399 | |||
400 | /* halt the dma controller */ | ||
401 | sh_dmae_ctl_stop(shdev); | ||
402 | |||
403 | /* We cannot detect, which channel caused the error, have to reset all */ | ||
404 | ret = shdma_reset(&shdev->shdma_dev); | ||
405 | |||
406 | sh_dmae_rst(shdev); | ||
407 | |||
408 | return ret; | ||
409 | } | ||
410 | |||
411 | static irqreturn_t sh_dmae_err(int irq, void *data) | ||
412 | { | ||
413 | struct sh_dmae_device *shdev = data; | ||
414 | |||
415 | if (!(dmaor_read(shdev) & DMAOR_AE)) | ||
416 | return IRQ_NONE; | ||
417 | |||
418 | sh_dmae_reset(shdev); | ||
419 | return IRQ_HANDLED; | ||
420 | } | ||
421 | |||
422 | static bool sh_dmae_desc_completed(struct shdma_chan *schan, | ||
423 | struct shdma_desc *sdesc) | ||
424 | { | ||
425 | struct sh_dmae_chan *sh_chan = container_of(schan, | ||
426 | struct sh_dmae_chan, shdma_chan); | ||
427 | struct sh_dmae_desc *sh_desc = container_of(sdesc, | ||
428 | struct sh_dmae_desc, shdma_desc); | ||
429 | u32 sar_buf = sh_dmae_readl(sh_chan, SAR); | ||
430 | u32 dar_buf = sh_dmae_readl(sh_chan, DAR); | ||
431 | |||
432 | return (sdesc->direction == DMA_DEV_TO_MEM && | ||
433 | (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) || | ||
434 | (sdesc->direction != DMA_DEV_TO_MEM && | ||
435 | (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf); | ||
436 | } | ||
437 | |||
438 | static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev) | ||
439 | { | ||
440 | /* Fast path out if NMIF is not asserted for this controller */ | ||
441 | if ((dmaor_read(shdev) & DMAOR_NMIF) == 0) | ||
442 | return false; | ||
443 | |||
444 | return sh_dmae_reset(shdev); | ||
445 | } | ||
446 | |||
447 | static int sh_dmae_nmi_handler(struct notifier_block *self, | ||
448 | unsigned long cmd, void *data) | ||
449 | { | ||
450 | struct sh_dmae_device *shdev; | ||
451 | int ret = NOTIFY_DONE; | ||
452 | bool triggered; | ||
453 | |||
454 | /* | ||
455 | * Only concern ourselves with NMI events. | ||
456 | * | ||
457 | * Normally we would check the die chain value, but as this needs | ||
458 | * to be architecture independent, check for NMI context instead. | ||
459 | */ | ||
460 | if (!in_nmi()) | ||
461 | return NOTIFY_DONE; | ||
462 | |||
463 | rcu_read_lock(); | ||
464 | list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) { | ||
465 | /* | ||
466 | * Only stop if one of the controllers has NMIF asserted, | ||
467 | * we do not want to interfere with regular address error | ||
468 | * handling or NMI events that don't concern the DMACs. | ||
469 | */ | ||
470 | triggered = sh_dmae_nmi_notify(shdev); | ||
471 | if (triggered == true) | ||
472 | ret = NOTIFY_OK; | ||
473 | } | ||
474 | rcu_read_unlock(); | ||
475 | |||
476 | return ret; | ||
477 | } | ||
478 | |||
479 | static struct notifier_block sh_dmae_nmi_notifier __read_mostly = { | ||
480 | .notifier_call = sh_dmae_nmi_handler, | ||
481 | |||
482 | /* Run before NMI debug handler and KGDB */ | ||
483 | .priority = 1, | ||
484 | }; | ||
485 | |||
486 | static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, | ||
487 | int irq, unsigned long flags) | ||
488 | { | ||
489 | const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; | ||
490 | struct shdma_dev *sdev = &shdev->shdma_dev; | ||
491 | struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev); | ||
492 | struct sh_dmae_chan *sh_chan; | ||
493 | struct shdma_chan *schan; | ||
494 | int err; | ||
495 | |||
496 | sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL); | ||
497 | if (!sh_chan) { | ||
498 | dev_err(sdev->dma_dev.dev, | ||
499 | "No free memory for allocating dma channels!\n"); | ||
500 | return -ENOMEM; | ||
501 | } | ||
502 | |||
503 | schan = &sh_chan->shdma_chan; | ||
504 | schan->max_xfer_len = SH_DMA_TCR_MAX + 1; | ||
505 | |||
506 | shdma_chan_probe(sdev, schan, id); | ||
507 | |||
508 | sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32); | ||
509 | |||
510 | /* set up channel irq */ | ||
511 | if (pdev->id >= 0) | ||
512 | snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id), | ||
513 | "sh-dmae%d.%d", pdev->id, id); | ||
514 | else | ||
515 | snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id), | ||
516 | "sh-dma%d", id); | ||
517 | |||
518 | err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id); | ||
519 | if (err) { | ||
520 | dev_err(sdev->dma_dev.dev, | ||
521 | "DMA channel %d request_irq error %d\n", | ||
522 | id, err); | ||
523 | goto err_no_irq; | ||
524 | } | ||
525 | |||
526 | shdev->chan[id] = sh_chan; | ||
527 | return 0; | ||
528 | |||
529 | err_no_irq: | ||
530 | /* remove from dmaengine device node */ | ||
531 | shdma_chan_remove(schan); | ||
532 | kfree(sh_chan); | ||
533 | return err; | ||
534 | } | ||
535 | |||
536 | static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) | ||
537 | { | ||
538 | struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev; | ||
539 | struct shdma_chan *schan; | ||
540 | int i; | ||
541 | |||
542 | shdma_for_each_chan(schan, &shdev->shdma_dev, i) { | ||
543 | struct sh_dmae_chan *sh_chan = container_of(schan, | ||
544 | struct sh_dmae_chan, shdma_chan); | ||
545 | BUG_ON(!schan); | ||
546 | |||
547 | shdma_free_irq(&sh_chan->shdma_chan); | ||
548 | |||
549 | shdma_chan_remove(schan); | ||
550 | kfree(sh_chan); | ||
551 | } | ||
552 | dma_dev->chancnt = 0; | ||
553 | } | ||
554 | |||
555 | static void sh_dmae_shutdown(struct platform_device *pdev) | ||
556 | { | ||
557 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | ||
558 | sh_dmae_ctl_stop(shdev); | ||
559 | } | ||
560 | |||
561 | static int sh_dmae_runtime_suspend(struct device *dev) | ||
562 | { | ||
563 | return 0; | ||
564 | } | ||
565 | |||
566 | static int sh_dmae_runtime_resume(struct device *dev) | ||
567 | { | ||
568 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); | ||
569 | |||
570 | return sh_dmae_rst(shdev); | ||
571 | } | ||
572 | |||
573 | #ifdef CONFIG_PM | ||
574 | static int sh_dmae_suspend(struct device *dev) | ||
575 | { | ||
576 | return 0; | ||
577 | } | ||
578 | |||
579 | static int sh_dmae_resume(struct device *dev) | ||
580 | { | ||
581 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); | ||
582 | int i, ret; | ||
583 | |||
584 | ret = sh_dmae_rst(shdev); | ||
585 | if (ret < 0) | ||
586 | dev_err(dev, "Failed to reset!\n"); | ||
587 | |||
588 | for (i = 0; i < shdev->pdata->channel_num; i++) { | ||
589 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | ||
590 | |||
591 | if (!sh_chan->shdma_chan.desc_num) | ||
592 | continue; | ||
593 | |||
594 | if (sh_chan->shdma_chan.slave_id >= 0) { | ||
595 | const struct sh_dmae_slave_config *cfg = sh_chan->config; | ||
596 | dmae_set_dmars(sh_chan, cfg->mid_rid); | ||
597 | dmae_set_chcr(sh_chan, cfg->chcr); | ||
598 | } else { | ||
599 | dmae_init(sh_chan); | ||
600 | } | ||
601 | } | ||
602 | |||
603 | return 0; | ||
604 | } | ||
605 | #else | ||
606 | #define sh_dmae_suspend NULL | ||
607 | #define sh_dmae_resume NULL | ||
608 | #endif | ||
609 | |||
610 | const struct dev_pm_ops sh_dmae_pm = { | ||
611 | .suspend = sh_dmae_suspend, | ||
612 | .resume = sh_dmae_resume, | ||
613 | .runtime_suspend = sh_dmae_runtime_suspend, | ||
614 | .runtime_resume = sh_dmae_runtime_resume, | ||
615 | }; | ||
616 | |||
617 | static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan) | ||
618 | { | ||
619 | struct sh_dmae_chan *sh_chan = container_of(schan, | ||
620 | struct sh_dmae_chan, shdma_chan); | ||
621 | |||
622 | /* | ||
623 | * Implicit BUG_ON(!sh_chan->config) | ||
624 | * This is an exclusive slave DMA operation, may only be called after a | ||
625 | * successful slave configuration. | ||
626 | */ | ||
627 | return sh_chan->config->addr; | ||
628 | } | ||
629 | |||
630 | static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i) | ||
631 | { | ||
632 | return &((struct sh_dmae_desc *)buf)[i].shdma_desc; | ||
633 | } | ||
634 | |||
635 | static const struct shdma_ops sh_dmae_shdma_ops = { | ||
636 | .desc_completed = sh_dmae_desc_completed, | ||
637 | .halt_channel = sh_dmae_halt, | ||
638 | .channel_busy = sh_dmae_channel_busy, | ||
639 | .slave_addr = sh_dmae_slave_addr, | ||
640 | .desc_setup = sh_dmae_desc_setup, | ||
641 | .set_slave = sh_dmae_set_slave, | ||
642 | .setup_xfer = sh_dmae_setup_xfer, | ||
643 | .start_xfer = sh_dmae_start_xfer, | ||
644 | .embedded_desc = sh_dmae_embedded_desc, | ||
645 | .chan_irq = sh_dmae_chan_irq, | ||
646 | .get_partial = sh_dmae_get_partial, | ||
647 | }; | ||
648 | |||
649 | static int sh_dmae_probe(struct platform_device *pdev) | ||
650 | { | ||
651 | struct sh_dmae_pdata *pdata = pdev->dev.platform_data; | ||
652 | unsigned long irqflags = IRQF_DISABLED, | ||
653 | chan_flag[SH_DMAE_MAX_CHANNELS] = {}; | ||
654 | int errirq, chan_irq[SH_DMAE_MAX_CHANNELS]; | ||
655 | int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; | ||
656 | struct sh_dmae_device *shdev; | ||
657 | struct dma_device *dma_dev; | ||
658 | struct resource *chan, *dmars, *errirq_res, *chanirq_res; | ||
659 | |||
660 | /* get platform data */ | ||
661 | if (!pdata || !pdata->channel_num) | ||
662 | return -ENODEV; | ||
663 | |||
664 | chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
665 | /* DMARS area is optional */ | ||
666 | dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
667 | /* | ||
668 | * IRQ resources: | ||
669 | * 1. there always must be at least one IRQ IO-resource. On SH4 it is | ||
670 | * the error IRQ, in which case it is the only IRQ in this resource: | ||
671 | * start == end. If it is the only IRQ resource, all channels also | ||
672 | * use the same IRQ. | ||
673 | * 2. DMA channel IRQ resources can be specified one per resource or in | ||
674 | * ranges (start != end) | ||
675 | * 3. iff all events (channels and, optionally, error) on this | ||
676 | * controller use the same IRQ, only one IRQ resource can be | ||
677 | * specified, otherwise there must be one IRQ per channel, even if | ||
678 | * some of them are equal | ||
679 | * 4. if all IRQs on this controller are equal or if some specific IRQs | ||
680 | * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be | ||
681 | * requested with the IRQF_SHARED flag | ||
682 | */ | ||
683 | errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
684 | if (!chan || !errirq_res) | ||
685 | return -ENODEV; | ||
686 | |||
687 | if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) { | ||
688 | dev_err(&pdev->dev, "DMAC register region already claimed\n"); | ||
689 | return -EBUSY; | ||
690 | } | ||
691 | |||
692 | if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) { | ||
693 | dev_err(&pdev->dev, "DMAC DMARS region already claimed\n"); | ||
694 | err = -EBUSY; | ||
695 | goto ermrdmars; | ||
696 | } | ||
697 | |||
698 | err = -ENOMEM; | ||
699 | shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); | ||
700 | if (!shdev) { | ||
701 | dev_err(&pdev->dev, "Not enough memory\n"); | ||
702 | goto ealloc; | ||
703 | } | ||
704 | |||
705 | dma_dev = &shdev->shdma_dev.dma_dev; | ||
706 | |||
707 | shdev->chan_reg = ioremap(chan->start, resource_size(chan)); | ||
708 | if (!shdev->chan_reg) | ||
709 | goto emapchan; | ||
710 | if (dmars) { | ||
711 | shdev->dmars = ioremap(dmars->start, resource_size(dmars)); | ||
712 | if (!shdev->dmars) | ||
713 | goto emapdmars; | ||
714 | } | ||
715 | |||
716 | if (!pdata->slave_only) | ||
717 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); | ||
718 | if (pdata->slave && pdata->slave_num) | ||
719 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); | ||
720 | |||
721 | /* Default transfer size of 32 bytes requires 32-byte alignment */ | ||
722 | dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE; | ||
723 | |||
724 | shdev->shdma_dev.ops = &sh_dmae_shdma_ops; | ||
725 | shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc); | ||
726 | err = shdma_init(&pdev->dev, &shdev->shdma_dev, | ||
727 | pdata->channel_num); | ||
728 | if (err < 0) | ||
729 | goto eshdma; | ||
730 | |||
731 | /* platform data */ | ||
732 | shdev->pdata = pdev->dev.platform_data; | ||
733 | |||
734 | if (pdata->chcr_offset) | ||
735 | shdev->chcr_offset = pdata->chcr_offset; | ||
736 | else | ||
737 | shdev->chcr_offset = CHCR; | ||
738 | |||
739 | if (pdata->chcr_ie_bit) | ||
740 | shdev->chcr_ie_bit = pdata->chcr_ie_bit; | ||
741 | else | ||
742 | shdev->chcr_ie_bit = CHCR_IE; | ||
743 | |||
744 | platform_set_drvdata(pdev, shdev); | ||
745 | |||
746 | pm_runtime_enable(&pdev->dev); | ||
747 | err = pm_runtime_get_sync(&pdev->dev); | ||
748 | if (err < 0) | ||
749 | dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err); | ||
750 | |||
751 | spin_lock_irq(&sh_dmae_lock); | ||
752 | list_add_tail_rcu(&shdev->node, &sh_dmae_devices); | ||
753 | spin_unlock_irq(&sh_dmae_lock); | ||
754 | |||
755 | /* reset dma controller - only needed as a test */ | ||
756 | err = sh_dmae_rst(shdev); | ||
757 | if (err) | ||
758 | goto rst_err; | ||
759 | |||
760 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) | ||
761 | chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); | ||
762 | |||
763 | if (!chanirq_res) | ||
764 | chanirq_res = errirq_res; | ||
765 | else | ||
766 | irqres++; | ||
767 | |||
768 | if (chanirq_res == errirq_res || | ||
769 | (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) | ||
770 | irqflags = IRQF_SHARED; | ||
771 | |||
772 | errirq = errirq_res->start; | ||
773 | |||
774 | err = request_irq(errirq, sh_dmae_err, irqflags, | ||
775 | "DMAC Address Error", shdev); | ||
776 | if (err) { | ||
777 | dev_err(&pdev->dev, | ||
778 | "DMA failed requesting irq #%d, error %d\n", | ||
779 | errirq, err); | ||
780 | goto eirq_err; | ||
781 | } | ||
782 | |||
783 | #else | ||
784 | chanirq_res = errirq_res; | ||
785 | #endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */ | ||
786 | |||
787 | if (chanirq_res->start == chanirq_res->end && | ||
788 | !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { | ||
789 | /* Special case - all multiplexed */ | ||
790 | for (; irq_cnt < pdata->channel_num; irq_cnt++) { | ||
791 | if (irq_cnt < SH_DMAE_MAX_CHANNELS) { | ||
792 | chan_irq[irq_cnt] = chanirq_res->start; | ||
793 | chan_flag[irq_cnt] = IRQF_SHARED; | ||
794 | } else { | ||
795 | irq_cap = 1; | ||
796 | break; | ||
797 | } | ||
798 | } | ||
799 | } else { | ||
800 | do { | ||
801 | for (i = chanirq_res->start; i <= chanirq_res->end; i++) { | ||
802 | if (irq_cnt >= SH_DMAE_MAX_CHANNELS) { | ||
803 | irq_cap = 1; | ||
804 | break; | ||
805 | } | ||
806 | |||
807 | if ((errirq_res->flags & IORESOURCE_BITS) == | ||
808 | IORESOURCE_IRQ_SHAREABLE) | ||
809 | chan_flag[irq_cnt] = IRQF_SHARED; | ||
810 | else | ||
811 | chan_flag[irq_cnt] = IRQF_DISABLED; | ||
812 | dev_dbg(&pdev->dev, | ||
813 | "Found IRQ %d for channel %d\n", | ||
814 | i, irq_cnt); | ||
815 | chan_irq[irq_cnt++] = i; | ||
816 | } | ||
817 | |||
818 | if (irq_cnt >= SH_DMAE_MAX_CHANNELS) | ||
819 | break; | ||
820 | |||
821 | chanirq_res = platform_get_resource(pdev, | ||
822 | IORESOURCE_IRQ, ++irqres); | ||
823 | } while (irq_cnt < pdata->channel_num && chanirq_res); | ||
824 | } | ||
825 | |||
826 | /* Create DMA Channel */ | ||
827 | for (i = 0; i < irq_cnt; i++) { | ||
828 | err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); | ||
829 | if (err) | ||
830 | goto chan_probe_err; | ||
831 | } | ||
832 | |||
833 | if (irq_cap) | ||
834 | dev_notice(&pdev->dev, "Attempting to register %d DMA " | ||
835 | "channels when a maximum of %d are supported.\n", | ||
836 | pdata->channel_num, SH_DMAE_MAX_CHANNELS); | ||
837 | |||
838 | pm_runtime_put(&pdev->dev); | ||
839 | |||
840 | err = dma_async_device_register(&shdev->shdma_dev.dma_dev); | ||
841 | if (err < 0) | ||
842 | goto edmadevreg; | ||
843 | |||
844 | return err; | ||
845 | |||
846 | edmadevreg: | ||
847 | pm_runtime_get(&pdev->dev); | ||
848 | |||
849 | chan_probe_err: | ||
850 | sh_dmae_chan_remove(shdev); | ||
851 | |||
852 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) | ||
853 | free_irq(errirq, shdev); | ||
854 | eirq_err: | ||
855 | #endif | ||
856 | rst_err: | ||
857 | spin_lock_irq(&sh_dmae_lock); | ||
858 | list_del_rcu(&shdev->node); | ||
859 | spin_unlock_irq(&sh_dmae_lock); | ||
860 | |||
861 | pm_runtime_put(&pdev->dev); | ||
862 | pm_runtime_disable(&pdev->dev); | ||
863 | |||
864 | platform_set_drvdata(pdev, NULL); | ||
865 | shdma_cleanup(&shdev->shdma_dev); | ||
866 | eshdma: | ||
867 | if (dmars) | ||
868 | iounmap(shdev->dmars); | ||
869 | emapdmars: | ||
870 | iounmap(shdev->chan_reg); | ||
871 | synchronize_rcu(); | ||
872 | emapchan: | ||
873 | kfree(shdev); | ||
874 | ealloc: | ||
875 | if (dmars) | ||
876 | release_mem_region(dmars->start, resource_size(dmars)); | ||
877 | ermrdmars: | ||
878 | release_mem_region(chan->start, resource_size(chan)); | ||
879 | |||
880 | return err; | ||
881 | } | ||
882 | |||
883 | static int sh_dmae_remove(struct platform_device *pdev) | ||
884 | { | ||
885 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | ||
886 | struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev; | ||
887 | struct resource *res; | ||
888 | int errirq = platform_get_irq(pdev, 0); | ||
889 | |||
890 | dma_async_device_unregister(dma_dev); | ||
891 | |||
892 | if (errirq > 0) | ||
893 | free_irq(errirq, shdev); | ||
894 | |||
895 | spin_lock_irq(&sh_dmae_lock); | ||
896 | list_del_rcu(&shdev->node); | ||
897 | spin_unlock_irq(&sh_dmae_lock); | ||
898 | |||
899 | pm_runtime_disable(&pdev->dev); | ||
900 | |||
901 | sh_dmae_chan_remove(shdev); | ||
902 | shdma_cleanup(&shdev->shdma_dev); | ||
903 | |||
904 | if (shdev->dmars) | ||
905 | iounmap(shdev->dmars); | ||
906 | iounmap(shdev->chan_reg); | ||
907 | |||
908 | platform_set_drvdata(pdev, NULL); | ||
909 | |||
910 | synchronize_rcu(); | ||
911 | kfree(shdev); | ||
912 | |||
913 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
914 | if (res) | ||
915 | release_mem_region(res->start, resource_size(res)); | ||
916 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
917 | if (res) | ||
918 | release_mem_region(res->start, resource_size(res)); | ||
919 | |||
920 | return 0; | ||
921 | } | ||
922 | |||
923 | static struct platform_driver sh_dmae_driver = { | ||
924 | .driver = { | ||
925 | .owner = THIS_MODULE, | ||
926 | .pm = &sh_dmae_pm, | ||
927 | .name = SH_DMAE_DRV_NAME, | ||
928 | }, | ||
929 | .remove = sh_dmae_remove, | ||
930 | .shutdown = sh_dmae_shutdown, | ||
931 | }; | ||
932 | |||
933 | static int __init sh_dmae_init(void) | ||
934 | { | ||
935 | /* Wire up NMI handling */ | ||
936 | int err = register_die_notifier(&sh_dmae_nmi_notifier); | ||
937 | if (err) | ||
938 | return err; | ||
939 | |||
940 | return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); | ||
941 | } | ||
942 | module_init(sh_dmae_init); | ||
943 | |||
944 | static void __exit sh_dmae_exit(void) | ||
945 | { | ||
946 | platform_driver_unregister(&sh_dmae_driver); | ||
947 | |||
948 | unregister_die_notifier(&sh_dmae_nmi_notifier); | ||
949 | } | ||
950 | module_exit(sh_dmae_exit); | ||
951 | |||
952 | MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); | ||
953 | MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); | ||
954 | MODULE_LICENSE("GPL"); | ||
955 | MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME); | ||
diff --git a/drivers/dma/sh/shdma.h b/drivers/dma/sh/shdma.h deleted file mode 100644 index 9314e93225d..00000000000 --- a/drivers/dma/sh/shdma.h +++ /dev/null | |||
@@ -1,64 +0,0 @@ | |||
1 | /* | ||
2 | * Renesas SuperH DMA Engine support | ||
3 | * | ||
4 | * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> | ||
5 | * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. | ||
6 | * | ||
7 | * This is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | */ | ||
13 | #ifndef __DMA_SHDMA_H | ||
14 | #define __DMA_SHDMA_H | ||
15 | |||
16 | #include <linux/sh_dma.h> | ||
17 | #include <linux/shdma-base.h> | ||
18 | #include <linux/dmaengine.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/list.h> | ||
21 | |||
22 | #define SH_DMAE_MAX_CHANNELS 20 | ||
23 | #define SH_DMAE_TCR_MAX 0x00FFFFFF /* 16MB */ | ||
24 | |||
25 | struct device; | ||
26 | |||
27 | struct sh_dmae_chan { | ||
28 | struct shdma_chan shdma_chan; | ||
29 | const struct sh_dmae_slave_config *config; /* Slave DMA configuration */ | ||
30 | int xmit_shift; /* log_2(bytes_per_xfer) */ | ||
31 | u32 __iomem *base; | ||
32 | char dev_id[16]; /* unique name per DMAC of channel */ | ||
33 | int pm_error; | ||
34 | }; | ||
35 | |||
36 | struct sh_dmae_device { | ||
37 | struct shdma_dev shdma_dev; | ||
38 | struct sh_dmae_chan *chan[SH_DMAE_MAX_CHANNELS]; | ||
39 | struct sh_dmae_pdata *pdata; | ||
40 | struct list_head node; | ||
41 | u32 __iomem *chan_reg; | ||
42 | u16 __iomem *dmars; | ||
43 | unsigned int chcr_offset; | ||
44 | u32 chcr_ie_bit; | ||
45 | }; | ||
46 | |||
47 | struct sh_dmae_regs { | ||
48 | u32 sar; /* SAR / source address */ | ||
49 | u32 dar; /* DAR / destination address */ | ||
50 | u32 tcr; /* TCR / transfer count */ | ||
51 | }; | ||
52 | |||
53 | struct sh_dmae_desc { | ||
54 | struct sh_dmae_regs hw; | ||
55 | struct shdma_desc shdma_desc; | ||
56 | }; | ||
57 | |||
58 | #define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, shdma_chan) | ||
59 | #define to_sh_desc(lh) container_of(lh, struct sh_desc, node) | ||
60 | #define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx) | ||
61 | #define to_sh_dev(chan) container_of(chan->shdma_chan.dma_chan.device,\ | ||
62 | struct sh_dmae_device, shdma_dev.dma_dev) | ||
63 | |||
64 | #endif /* __DMA_SHDMA_H */ | ||
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c deleted file mode 100644 index 94674a96c64..00000000000 --- a/drivers/dma/sirf-dma.c +++ /dev/null | |||
@@ -1,689 +0,0 @@ | |||
1 | /* | ||
2 | * DMA controller driver for CSR SiRFprimaII | ||
3 | * | ||
4 | * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. | ||
5 | * | ||
6 | * Licensed under GPLv2 or later. | ||
7 | */ | ||
8 | |||
9 | #include <linux/module.h> | ||
10 | #include <linux/dmaengine.h> | ||
11 | #include <linux/dma-mapping.h> | ||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/io.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/of_irq.h> | ||
16 | #include <linux/of_address.h> | ||
17 | #include <linux/of_device.h> | ||
18 | #include <linux/of_platform.h> | ||
19 | #include <linux/sirfsoc_dma.h> | ||
20 | |||
21 | #include "dmaengine.h" | ||
22 | |||
23 | #define SIRFSOC_DMA_DESCRIPTORS 16 | ||
24 | #define SIRFSOC_DMA_CHANNELS 16 | ||
25 | |||
26 | #define SIRFSOC_DMA_CH_ADDR 0x00 | ||
27 | #define SIRFSOC_DMA_CH_XLEN 0x04 | ||
28 | #define SIRFSOC_DMA_CH_YLEN 0x08 | ||
29 | #define SIRFSOC_DMA_CH_CTRL 0x0C | ||
30 | |||
31 | #define SIRFSOC_DMA_WIDTH_0 0x100 | ||
32 | #define SIRFSOC_DMA_CH_VALID 0x140 | ||
33 | #define SIRFSOC_DMA_CH_INT 0x144 | ||
34 | #define SIRFSOC_DMA_INT_EN 0x148 | ||
35 | #define SIRFSOC_DMA_CH_LOOP_CTRL 0x150 | ||
36 | |||
37 | #define SIRFSOC_DMA_MODE_CTRL_BIT 4 | ||
38 | #define SIRFSOC_DMA_DIR_CTRL_BIT 5 | ||
39 | |||
40 | /* xlen and dma_width register is in 4 bytes boundary */ | ||
41 | #define SIRFSOC_DMA_WORD_LEN 4 | ||
42 | |||
43 | struct sirfsoc_dma_desc { | ||
44 | struct dma_async_tx_descriptor desc; | ||
45 | struct list_head node; | ||
46 | |||
47 | /* SiRFprimaII 2D-DMA parameters */ | ||
48 | |||
49 | int xlen; /* DMA xlen */ | ||
50 | int ylen; /* DMA ylen */ | ||
51 | int width; /* DMA width */ | ||
52 | int dir; | ||
53 | bool cyclic; /* is loop DMA? */ | ||
54 | u32 addr; /* DMA buffer address */ | ||
55 | }; | ||
56 | |||
57 | struct sirfsoc_dma_chan { | ||
58 | struct dma_chan chan; | ||
59 | struct list_head free; | ||
60 | struct list_head prepared; | ||
61 | struct list_head queued; | ||
62 | struct list_head active; | ||
63 | struct list_head completed; | ||
64 | unsigned long happened_cyclic; | ||
65 | unsigned long completed_cyclic; | ||
66 | |||
67 | /* Lock for this structure */ | ||
68 | spinlock_t lock; | ||
69 | |||
70 | int mode; | ||
71 | }; | ||
72 | |||
73 | struct sirfsoc_dma { | ||
74 | struct dma_device dma; | ||
75 | struct tasklet_struct tasklet; | ||
76 | struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS]; | ||
77 | void __iomem *base; | ||
78 | int irq; | ||
79 | }; | ||
80 | |||
81 | #define DRV_NAME "sirfsoc_dma" | ||
82 | |||
83 | /* Convert struct dma_chan to struct sirfsoc_dma_chan */ | ||
84 | static inline | ||
85 | struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c) | ||
86 | { | ||
87 | return container_of(c, struct sirfsoc_dma_chan, chan); | ||
88 | } | ||
89 | |||
90 | /* Convert struct dma_chan to struct sirfsoc_dma */ | ||
91 | static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c) | ||
92 | { | ||
93 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c); | ||
94 | return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]); | ||
95 | } | ||
96 | |||
97 | /* Execute all queued DMA descriptors */ | ||
98 | static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan) | ||
99 | { | ||
100 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); | ||
101 | int cid = schan->chan.chan_id; | ||
102 | struct sirfsoc_dma_desc *sdesc = NULL; | ||
103 | |||
104 | /* | ||
105 | * lock has been held by functions calling this, so we don't hold | ||
106 | * lock again | ||
107 | */ | ||
108 | |||
109 | sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc, | ||
110 | node); | ||
111 | /* Move the first queued descriptor to active list */ | ||
112 | list_move_tail(&sdesc->node, &schan->active); | ||
113 | |||
114 | /* Start the DMA transfer */ | ||
115 | writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 + | ||
116 | cid * 4); | ||
117 | writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) | | ||
118 | (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT), | ||
119 | sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL); | ||
120 | writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 + | ||
121 | SIRFSOC_DMA_CH_XLEN); | ||
122 | writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 + | ||
123 | SIRFSOC_DMA_CH_YLEN); | ||
124 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) | | ||
125 | (1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); | ||
126 | |||
127 | /* | ||
128 | * writel has an implict memory write barrier to make sure data is | ||
129 | * flushed into memory before starting DMA | ||
130 | */ | ||
131 | writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR); | ||
132 | |||
133 | if (sdesc->cyclic) { | ||
134 | writel((1 << cid) | 1 << (cid + 16) | | ||
135 | readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL), | ||
136 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); | ||
137 | schan->happened_cyclic = schan->completed_cyclic = 0; | ||
138 | } | ||
139 | } | ||
140 | |||
141 | /* Interrupt handler */ | ||
142 | static irqreturn_t sirfsoc_dma_irq(int irq, void *data) | ||
143 | { | ||
144 | struct sirfsoc_dma *sdma = data; | ||
145 | struct sirfsoc_dma_chan *schan; | ||
146 | struct sirfsoc_dma_desc *sdesc = NULL; | ||
147 | u32 is; | ||
148 | int ch; | ||
149 | |||
150 | is = readl(sdma->base + SIRFSOC_DMA_CH_INT); | ||
151 | while ((ch = fls(is) - 1) >= 0) { | ||
152 | is &= ~(1 << ch); | ||
153 | writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT); | ||
154 | schan = &sdma->channels[ch]; | ||
155 | |||
156 | spin_lock(&schan->lock); | ||
157 | |||
158 | sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, | ||
159 | node); | ||
160 | if (!sdesc->cyclic) { | ||
161 | /* Execute queued descriptors */ | ||
162 | list_splice_tail_init(&schan->active, &schan->completed); | ||
163 | if (!list_empty(&schan->queued)) | ||
164 | sirfsoc_dma_execute(schan); | ||
165 | } else | ||
166 | schan->happened_cyclic++; | ||
167 | |||
168 | spin_unlock(&schan->lock); | ||
169 | } | ||
170 | |||
171 | /* Schedule tasklet */ | ||
172 | tasklet_schedule(&sdma->tasklet); | ||
173 | |||
174 | return IRQ_HANDLED; | ||
175 | } | ||
176 | |||
177 | /* process completed descriptors */ | ||
178 | static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma) | ||
179 | { | ||
180 | dma_cookie_t last_cookie = 0; | ||
181 | struct sirfsoc_dma_chan *schan; | ||
182 | struct sirfsoc_dma_desc *sdesc; | ||
183 | struct dma_async_tx_descriptor *desc; | ||
184 | unsigned long flags; | ||
185 | unsigned long happened_cyclic; | ||
186 | LIST_HEAD(list); | ||
187 | int i; | ||
188 | |||
189 | for (i = 0; i < sdma->dma.chancnt; i++) { | ||
190 | schan = &sdma->channels[i]; | ||
191 | |||
192 | /* Get all completed descriptors */ | ||
193 | spin_lock_irqsave(&schan->lock, flags); | ||
194 | if (!list_empty(&schan->completed)) { | ||
195 | list_splice_tail_init(&schan->completed, &list); | ||
196 | spin_unlock_irqrestore(&schan->lock, flags); | ||
197 | |||
198 | /* Execute callbacks and run dependencies */ | ||
199 | list_for_each_entry(sdesc, &list, node) { | ||
200 | desc = &sdesc->desc; | ||
201 | |||
202 | if (desc->callback) | ||
203 | desc->callback(desc->callback_param); | ||
204 | |||
205 | last_cookie = desc->cookie; | ||
206 | dma_run_dependencies(desc); | ||
207 | } | ||
208 | |||
209 | /* Free descriptors */ | ||
210 | spin_lock_irqsave(&schan->lock, flags); | ||
211 | list_splice_tail_init(&list, &schan->free); | ||
212 | schan->chan.completed_cookie = last_cookie; | ||
213 | spin_unlock_irqrestore(&schan->lock, flags); | ||
214 | } else { | ||
215 | /* for cyclic channel, desc is always in active list */ | ||
216 | sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, | ||
217 | node); | ||
218 | |||
219 | if (!sdesc || (sdesc && !sdesc->cyclic)) { | ||
220 | /* without active cyclic DMA */ | ||
221 | spin_unlock_irqrestore(&schan->lock, flags); | ||
222 | continue; | ||
223 | } | ||
224 | |||
225 | /* cyclic DMA */ | ||
226 | happened_cyclic = schan->happened_cyclic; | ||
227 | spin_unlock_irqrestore(&schan->lock, flags); | ||
228 | |||
229 | desc = &sdesc->desc; | ||
230 | while (happened_cyclic != schan->completed_cyclic) { | ||
231 | if (desc->callback) | ||
232 | desc->callback(desc->callback_param); | ||
233 | schan->completed_cyclic++; | ||
234 | } | ||
235 | } | ||
236 | } | ||
237 | } | ||
238 | |||
239 | /* DMA Tasklet */ | ||
240 | static void sirfsoc_dma_tasklet(unsigned long data) | ||
241 | { | ||
242 | struct sirfsoc_dma *sdma = (void *)data; | ||
243 | |||
244 | sirfsoc_dma_process_completed(sdma); | ||
245 | } | ||
246 | |||
247 | /* Submit descriptor to hardware */ | ||
248 | static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd) | ||
249 | { | ||
250 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan); | ||
251 | struct sirfsoc_dma_desc *sdesc; | ||
252 | unsigned long flags; | ||
253 | dma_cookie_t cookie; | ||
254 | |||
255 | sdesc = container_of(txd, struct sirfsoc_dma_desc, desc); | ||
256 | |||
257 | spin_lock_irqsave(&schan->lock, flags); | ||
258 | |||
259 | /* Move descriptor to queue */ | ||
260 | list_move_tail(&sdesc->node, &schan->queued); | ||
261 | |||
262 | cookie = dma_cookie_assign(txd); | ||
263 | |||
264 | spin_unlock_irqrestore(&schan->lock, flags); | ||
265 | |||
266 | return cookie; | ||
267 | } | ||
268 | |||
269 | static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan, | ||
270 | struct dma_slave_config *config) | ||
271 | { | ||
272 | unsigned long flags; | ||
273 | |||
274 | if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || | ||
275 | (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)) | ||
276 | return -EINVAL; | ||
277 | |||
278 | spin_lock_irqsave(&schan->lock, flags); | ||
279 | schan->mode = (config->src_maxburst == 4 ? 1 : 0); | ||
280 | spin_unlock_irqrestore(&schan->lock, flags); | ||
281 | |||
282 | return 0; | ||
283 | } | ||
284 | |||
285 | static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan) | ||
286 | { | ||
287 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); | ||
288 | int cid = schan->chan.chan_id; | ||
289 | unsigned long flags; | ||
290 | |||
291 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) & | ||
292 | ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); | ||
293 | writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID); | ||
294 | |||
295 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL) | ||
296 | & ~((1 << cid) | 1 << (cid + 16)), | ||
297 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); | ||
298 | |||
299 | spin_lock_irqsave(&schan->lock, flags); | ||
300 | list_splice_tail_init(&schan->active, &schan->free); | ||
301 | list_splice_tail_init(&schan->queued, &schan->free); | ||
302 | spin_unlock_irqrestore(&schan->lock, flags); | ||
303 | |||
304 | return 0; | ||
305 | } | ||
306 | |||
307 | static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
308 | unsigned long arg) | ||
309 | { | ||
310 | struct dma_slave_config *config; | ||
311 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | ||
312 | |||
313 | switch (cmd) { | ||
314 | case DMA_TERMINATE_ALL: | ||
315 | return sirfsoc_dma_terminate_all(schan); | ||
316 | case DMA_SLAVE_CONFIG: | ||
317 | config = (struct dma_slave_config *)arg; | ||
318 | return sirfsoc_dma_slave_config(schan, config); | ||
319 | |||
320 | default: | ||
321 | break; | ||
322 | } | ||
323 | |||
324 | return -ENOSYS; | ||
325 | } | ||
326 | |||
327 | /* Alloc channel resources */ | ||
328 | static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan) | ||
329 | { | ||
330 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); | ||
331 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | ||
332 | struct sirfsoc_dma_desc *sdesc; | ||
333 | unsigned long flags; | ||
334 | LIST_HEAD(descs); | ||
335 | int i; | ||
336 | |||
337 | /* Alloc descriptors for this channel */ | ||
338 | for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) { | ||
339 | sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL); | ||
340 | if (!sdesc) { | ||
341 | dev_notice(sdma->dma.dev, "Memory allocation error. " | ||
342 | "Allocated only %u descriptors\n", i); | ||
343 | break; | ||
344 | } | ||
345 | |||
346 | dma_async_tx_descriptor_init(&sdesc->desc, chan); | ||
347 | sdesc->desc.flags = DMA_CTRL_ACK; | ||
348 | sdesc->desc.tx_submit = sirfsoc_dma_tx_submit; | ||
349 | |||
350 | list_add_tail(&sdesc->node, &descs); | ||
351 | } | ||
352 | |||
353 | /* Return error only if no descriptors were allocated */ | ||
354 | if (i == 0) | ||
355 | return -ENOMEM; | ||
356 | |||
357 | spin_lock_irqsave(&schan->lock, flags); | ||
358 | |||
359 | list_splice_tail_init(&descs, &schan->free); | ||
360 | spin_unlock_irqrestore(&schan->lock, flags); | ||
361 | |||
362 | return i; | ||
363 | } | ||
364 | |||
365 | /* Free channel resources */ | ||
366 | static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan) | ||
367 | { | ||
368 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | ||
369 | struct sirfsoc_dma_desc *sdesc, *tmp; | ||
370 | unsigned long flags; | ||
371 | LIST_HEAD(descs); | ||
372 | |||
373 | spin_lock_irqsave(&schan->lock, flags); | ||
374 | |||
375 | /* Channel must be idle */ | ||
376 | BUG_ON(!list_empty(&schan->prepared)); | ||
377 | BUG_ON(!list_empty(&schan->queued)); | ||
378 | BUG_ON(!list_empty(&schan->active)); | ||
379 | BUG_ON(!list_empty(&schan->completed)); | ||
380 | |||
381 | /* Move data */ | ||
382 | list_splice_tail_init(&schan->free, &descs); | ||
383 | |||
384 | spin_unlock_irqrestore(&schan->lock, flags); | ||
385 | |||
386 | /* Free descriptors */ | ||
387 | list_for_each_entry_safe(sdesc, tmp, &descs, node) | ||
388 | kfree(sdesc); | ||
389 | } | ||
390 | |||
391 | /* Send pending descriptor to hardware */ | ||
392 | static void sirfsoc_dma_issue_pending(struct dma_chan *chan) | ||
393 | { | ||
394 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | ||
395 | unsigned long flags; | ||
396 | |||
397 | spin_lock_irqsave(&schan->lock, flags); | ||
398 | |||
399 | if (list_empty(&schan->active) && !list_empty(&schan->queued)) | ||
400 | sirfsoc_dma_execute(schan); | ||
401 | |||
402 | spin_unlock_irqrestore(&schan->lock, flags); | ||
403 | } | ||
404 | |||
405 | /* Check request completion status */ | ||
406 | static enum dma_status | ||
407 | sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | ||
408 | struct dma_tx_state *txstate) | ||
409 | { | ||
410 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | ||
411 | unsigned long flags; | ||
412 | enum dma_status ret; | ||
413 | |||
414 | spin_lock_irqsave(&schan->lock, flags); | ||
415 | ret = dma_cookie_status(chan, cookie, txstate); | ||
416 | spin_unlock_irqrestore(&schan->lock, flags); | ||
417 | |||
418 | return ret; | ||
419 | } | ||
420 | |||
421 | static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved( | ||
422 | struct dma_chan *chan, struct dma_interleaved_template *xt, | ||
423 | unsigned long flags) | ||
424 | { | ||
425 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); | ||
426 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | ||
427 | struct sirfsoc_dma_desc *sdesc = NULL; | ||
428 | unsigned long iflags; | ||
429 | int ret; | ||
430 | |||
431 | if ((xt->dir != DMA_MEM_TO_DEV) && (xt->dir != DMA_DEV_TO_MEM)) { | ||
432 | ret = -EINVAL; | ||
433 | goto err_dir; | ||
434 | } | ||
435 | |||
436 | /* Get free descriptor */ | ||
437 | spin_lock_irqsave(&schan->lock, iflags); | ||
438 | if (!list_empty(&schan->free)) { | ||
439 | sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc, | ||
440 | node); | ||
441 | list_del(&sdesc->node); | ||
442 | } | ||
443 | spin_unlock_irqrestore(&schan->lock, iflags); | ||
444 | |||
445 | if (!sdesc) { | ||
446 | /* try to free completed descriptors */ | ||
447 | sirfsoc_dma_process_completed(sdma); | ||
448 | ret = 0; | ||
449 | goto no_desc; | ||
450 | } | ||
451 | |||
452 | /* Place descriptor in prepared list */ | ||
453 | spin_lock_irqsave(&schan->lock, iflags); | ||
454 | |||
455 | /* | ||
456 | * Number of chunks in a frame can only be 1 for prima2 | ||
457 | * and ylen (number of frame - 1) must be at least 0 | ||
458 | */ | ||
459 | if ((xt->frame_size == 1) && (xt->numf > 0)) { | ||
460 | sdesc->cyclic = 0; | ||
461 | sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN; | ||
462 | sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) / | ||
463 | SIRFSOC_DMA_WORD_LEN; | ||
464 | sdesc->ylen = xt->numf - 1; | ||
465 | if (xt->dir == DMA_MEM_TO_DEV) { | ||
466 | sdesc->addr = xt->src_start; | ||
467 | sdesc->dir = 1; | ||
468 | } else { | ||
469 | sdesc->addr = xt->dst_start; | ||
470 | sdesc->dir = 0; | ||
471 | } | ||
472 | |||
473 | list_add_tail(&sdesc->node, &schan->prepared); | ||
474 | } else { | ||
475 | pr_err("sirfsoc DMA Invalid xfer\n"); | ||
476 | ret = -EINVAL; | ||
477 | goto err_xfer; | ||
478 | } | ||
479 | spin_unlock_irqrestore(&schan->lock, iflags); | ||
480 | |||
481 | return &sdesc->desc; | ||
482 | err_xfer: | ||
483 | spin_unlock_irqrestore(&schan->lock, iflags); | ||
484 | no_desc: | ||
485 | err_dir: | ||
486 | return ERR_PTR(ret); | ||
487 | } | ||
488 | |||
489 | static struct dma_async_tx_descriptor * | ||
490 | sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr, | ||
491 | size_t buf_len, size_t period_len, | ||
492 | enum dma_transfer_direction direction, unsigned long flags, void *context) | ||
493 | { | ||
494 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | ||
495 | struct sirfsoc_dma_desc *sdesc = NULL; | ||
496 | unsigned long iflags; | ||
497 | |||
498 | /* | ||
499 | * we only support cycle transfer with 2 period | ||
500 | * If the X-length is set to 0, it would be the loop mode. | ||
501 | * The DMA address keeps increasing until reaching the end of a loop | ||
502 | * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then | ||
503 | * the DMA address goes back to the beginning of this area. | ||
504 | * In loop mode, the DMA data region is divided into two parts, BUFA | ||
505 | * and BUFB. DMA controller generates interrupts twice in each loop: | ||
506 | * when the DMA address reaches the end of BUFA or the end of the | ||
507 | * BUFB | ||
508 | */ | ||
509 | if (buf_len != 2 * period_len) | ||
510 | return ERR_PTR(-EINVAL); | ||
511 | |||
512 | /* Get free descriptor */ | ||
513 | spin_lock_irqsave(&schan->lock, iflags); | ||
514 | if (!list_empty(&schan->free)) { | ||
515 | sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc, | ||
516 | node); | ||
517 | list_del(&sdesc->node); | ||
518 | } | ||
519 | spin_unlock_irqrestore(&schan->lock, iflags); | ||
520 | |||
521 | if (!sdesc) | ||
522 | return 0; | ||
523 | |||
524 | /* Place descriptor in prepared list */ | ||
525 | spin_lock_irqsave(&schan->lock, iflags); | ||
526 | sdesc->addr = addr; | ||
527 | sdesc->cyclic = 1; | ||
528 | sdesc->xlen = 0; | ||
529 | sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1; | ||
530 | sdesc->width = 1; | ||
531 | list_add_tail(&sdesc->node, &schan->prepared); | ||
532 | spin_unlock_irqrestore(&schan->lock, iflags); | ||
533 | |||
534 | return &sdesc->desc; | ||
535 | } | ||
536 | |||
537 | /* | ||
538 | * The DMA controller consists of 16 independent DMA channels. | ||
539 | * Each channel is allocated to a different function | ||
540 | */ | ||
541 | bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id) | ||
542 | { | ||
543 | unsigned int ch_nr = (unsigned int) chan_id; | ||
544 | |||
545 | if (ch_nr == chan->chan_id + | ||
546 | chan->device->dev_id * SIRFSOC_DMA_CHANNELS) | ||
547 | return true; | ||
548 | |||
549 | return false; | ||
550 | } | ||
551 | EXPORT_SYMBOL(sirfsoc_dma_filter_id); | ||
552 | |||
553 | static int sirfsoc_dma_probe(struct platform_device *op) | ||
554 | { | ||
555 | struct device_node *dn = op->dev.of_node; | ||
556 | struct device *dev = &op->dev; | ||
557 | struct dma_device *dma; | ||
558 | struct sirfsoc_dma *sdma; | ||
559 | struct sirfsoc_dma_chan *schan; | ||
560 | struct resource res; | ||
561 | ulong regs_start, regs_size; | ||
562 | u32 id; | ||
563 | int ret, i; | ||
564 | |||
565 | sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL); | ||
566 | if (!sdma) { | ||
567 | dev_err(dev, "Memory exhausted!\n"); | ||
568 | return -ENOMEM; | ||
569 | } | ||
570 | |||
571 | if (of_property_read_u32(dn, "cell-index", &id)) { | ||
572 | dev_err(dev, "Fail to get DMAC index\n"); | ||
573 | return -ENODEV; | ||
574 | } | ||
575 | |||
576 | sdma->irq = irq_of_parse_and_map(dn, 0); | ||
577 | if (sdma->irq == NO_IRQ) { | ||
578 | dev_err(dev, "Error mapping IRQ!\n"); | ||
579 | return -EINVAL; | ||
580 | } | ||
581 | |||
582 | ret = of_address_to_resource(dn, 0, &res); | ||
583 | if (ret) { | ||
584 | dev_err(dev, "Error parsing memory region!\n"); | ||
585 | goto irq_dispose; | ||
586 | } | ||
587 | |||
588 | regs_start = res.start; | ||
589 | regs_size = resource_size(&res); | ||
590 | |||
591 | sdma->base = devm_ioremap(dev, regs_start, regs_size); | ||
592 | if (!sdma->base) { | ||
593 | dev_err(dev, "Error mapping memory region!\n"); | ||
594 | ret = -ENOMEM; | ||
595 | goto irq_dispose; | ||
596 | } | ||
597 | |||
598 | ret = request_irq(sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, sdma); | ||
599 | if (ret) { | ||
600 | dev_err(dev, "Error requesting IRQ!\n"); | ||
601 | ret = -EINVAL; | ||
602 | goto irq_dispose; | ||
603 | } | ||
604 | |||
605 | dma = &sdma->dma; | ||
606 | dma->dev = dev; | ||
607 | dma->chancnt = SIRFSOC_DMA_CHANNELS; | ||
608 | |||
609 | dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources; | ||
610 | dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources; | ||
611 | dma->device_issue_pending = sirfsoc_dma_issue_pending; | ||
612 | dma->device_control = sirfsoc_dma_control; | ||
613 | dma->device_tx_status = sirfsoc_dma_tx_status; | ||
614 | dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved; | ||
615 | dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic; | ||
616 | |||
617 | INIT_LIST_HEAD(&dma->channels); | ||
618 | dma_cap_set(DMA_SLAVE, dma->cap_mask); | ||
619 | dma_cap_set(DMA_CYCLIC, dma->cap_mask); | ||
620 | dma_cap_set(DMA_INTERLEAVE, dma->cap_mask); | ||
621 | dma_cap_set(DMA_PRIVATE, dma->cap_mask); | ||
622 | |||
623 | for (i = 0; i < dma->chancnt; i++) { | ||
624 | schan = &sdma->channels[i]; | ||
625 | |||
626 | schan->chan.device = dma; | ||
627 | dma_cookie_init(&schan->chan); | ||
628 | |||
629 | INIT_LIST_HEAD(&schan->free); | ||
630 | INIT_LIST_HEAD(&schan->prepared); | ||
631 | INIT_LIST_HEAD(&schan->queued); | ||
632 | INIT_LIST_HEAD(&schan->active); | ||
633 | INIT_LIST_HEAD(&schan->completed); | ||
634 | |||
635 | spin_lock_init(&schan->lock); | ||
636 | list_add_tail(&schan->chan.device_node, &dma->channels); | ||
637 | } | ||
638 | |||
639 | tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma); | ||
640 | |||
641 | /* Register DMA engine */ | ||
642 | dev_set_drvdata(dev, sdma); | ||
643 | ret = dma_async_device_register(dma); | ||
644 | if (ret) | ||
645 | goto free_irq; | ||
646 | |||
647 | dev_info(dev, "initialized SIRFSOC DMAC driver\n"); | ||
648 | |||
649 | return 0; | ||
650 | |||
651 | free_irq: | ||
652 | free_irq(sdma->irq, sdma); | ||
653 | irq_dispose: | ||
654 | irq_dispose_mapping(sdma->irq); | ||
655 | return ret; | ||
656 | } | ||
657 | |||
658 | static int sirfsoc_dma_remove(struct platform_device *op) | ||
659 | { | ||
660 | struct device *dev = &op->dev; | ||
661 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); | ||
662 | |||
663 | dma_async_device_unregister(&sdma->dma); | ||
664 | free_irq(sdma->irq, sdma); | ||
665 | irq_dispose_mapping(sdma->irq); | ||
666 | return 0; | ||
667 | } | ||
668 | |||
669 | static struct of_device_id sirfsoc_dma_match[] = { | ||
670 | { .compatible = "sirf,prima2-dmac", }, | ||
671 | {}, | ||
672 | }; | ||
673 | |||
674 | static struct platform_driver sirfsoc_dma_driver = { | ||
675 | .probe = sirfsoc_dma_probe, | ||
676 | .remove = sirfsoc_dma_remove, | ||
677 | .driver = { | ||
678 | .name = DRV_NAME, | ||
679 | .owner = THIS_MODULE, | ||
680 | .of_match_table = sirfsoc_dma_match, | ||
681 | }, | ||
682 | }; | ||
683 | |||
684 | module_platform_driver(sirfsoc_dma_driver); | ||
685 | |||
686 | MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, " | ||
687 | "Barry Song <baohua.song@csr.com>"); | ||
688 | MODULE_DESCRIPTION("SIRFSOC DMA control driver"); | ||
689 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 23c5573e62d..467e4dcb20a 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -9,19 +9,15 @@ | |||
9 | #include <linux/dma-mapping.h> | 9 | #include <linux/dma-mapping.h> |
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
12 | #include <linux/export.h> | ||
13 | #include <linux/dmaengine.h> | 12 | #include <linux/dmaengine.h> |
14 | #include <linux/platform_device.h> | 13 | #include <linux/platform_device.h> |
15 | #include <linux/clk.h> | 14 | #include <linux/clk.h> |
16 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
17 | #include <linux/pm.h> | ||
18 | #include <linux/pm_runtime.h> | ||
19 | #include <linux/err.h> | 16 | #include <linux/err.h> |
20 | #include <linux/amba/bus.h> | 17 | #include <linux/amba/bus.h> |
21 | #include <linux/regulator/consumer.h> | ||
22 | #include <linux/platform_data/dma-ste-dma40.h> | ||
23 | 18 | ||
24 | #include "dmaengine.h" | 19 | #include <plat/ste_dma40.h> |
20 | |||
25 | #include "ste_dma40_ll.h" | 21 | #include "ste_dma40_ll.h" |
26 | 22 | ||
27 | #define D40_NAME "dma40" | 23 | #define D40_NAME "dma40" |
@@ -35,9 +31,6 @@ | |||
35 | /* Maximum iterations taken before giving up suspending a channel */ | 31 | /* Maximum iterations taken before giving up suspending a channel */ |
36 | #define D40_SUSPEND_MAX_IT 500 | 32 | #define D40_SUSPEND_MAX_IT 500 |
37 | 33 | ||
38 | /* Milliseconds */ | ||
39 | #define DMA40_AUTOSUSPEND_DELAY 100 | ||
40 | |||
41 | /* Hardware requirement on LCLA alignment */ | 34 | /* Hardware requirement on LCLA alignment */ |
42 | #define LCLA_ALIGNMENT 0x40000 | 35 | #define LCLA_ALIGNMENT 0x40000 |
43 | 36 | ||
@@ -68,71 +61,6 @@ enum d40_command { | |||
68 | D40_DMA_SUSPENDED = 3 | 61 | D40_DMA_SUSPENDED = 3 |
69 | }; | 62 | }; |
70 | 63 | ||
71 | /* | ||
72 | * enum d40_events - The different Event Enables for the event lines. | ||
73 | * | ||
74 | * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan. | ||
75 | * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan. | ||
76 | * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line. | ||
77 | * @D40_ROUND_EVENTLINE: Status check for event line. | ||
78 | */ | ||
79 | |||
80 | enum d40_events { | ||
81 | D40_DEACTIVATE_EVENTLINE = 0, | ||
82 | D40_ACTIVATE_EVENTLINE = 1, | ||
83 | D40_SUSPEND_REQ_EVENTLINE = 2, | ||
84 | D40_ROUND_EVENTLINE = 3 | ||
85 | }; | ||
86 | |||
87 | /* | ||
88 | * These are the registers that has to be saved and later restored | ||
89 | * when the DMA hw is powered off. | ||
90 | * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works. | ||
91 | */ | ||
92 | static u32 d40_backup_regs[] = { | ||
93 | D40_DREG_LCPA, | ||
94 | D40_DREG_LCLA, | ||
95 | D40_DREG_PRMSE, | ||
96 | D40_DREG_PRMSO, | ||
97 | D40_DREG_PRMOE, | ||
98 | D40_DREG_PRMOO, | ||
99 | }; | ||
100 | |||
101 | #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs) | ||
102 | |||
103 | /* TODO: Check if all these registers have to be saved/restored on dma40 v3 */ | ||
104 | static u32 d40_backup_regs_v3[] = { | ||
105 | D40_DREG_PSEG1, | ||
106 | D40_DREG_PSEG2, | ||
107 | D40_DREG_PSEG3, | ||
108 | D40_DREG_PSEG4, | ||
109 | D40_DREG_PCEG1, | ||
110 | D40_DREG_PCEG2, | ||
111 | D40_DREG_PCEG3, | ||
112 | D40_DREG_PCEG4, | ||
113 | D40_DREG_RSEG1, | ||
114 | D40_DREG_RSEG2, | ||
115 | D40_DREG_RSEG3, | ||
116 | D40_DREG_RSEG4, | ||
117 | D40_DREG_RCEG1, | ||
118 | D40_DREG_RCEG2, | ||
119 | D40_DREG_RCEG3, | ||
120 | D40_DREG_RCEG4, | ||
121 | }; | ||
122 | |||
123 | #define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3) | ||
124 | |||
125 | static u32 d40_backup_regs_chan[] = { | ||
126 | D40_CHAN_REG_SSCFG, | ||
127 | D40_CHAN_REG_SSELT, | ||
128 | D40_CHAN_REG_SSPTR, | ||
129 | D40_CHAN_REG_SSLNK, | ||
130 | D40_CHAN_REG_SDCFG, | ||
131 | D40_CHAN_REG_SDELT, | ||
132 | D40_CHAN_REG_SDPTR, | ||
133 | D40_CHAN_REG_SDLNK, | ||
134 | }; | ||
135 | |||
136 | /** | 64 | /** |
137 | * struct d40_lli_pool - Structure for keeping LLIs in memory | 65 | * struct d40_lli_pool - Structure for keeping LLIs in memory |
138 | * | 66 | * |
@@ -167,7 +95,7 @@ struct d40_lli_pool { | |||
167 | * during a transfer. | 95 | * during a transfer. |
168 | * @node: List entry. | 96 | * @node: List entry. |
169 | * @is_in_client_list: true if the client owns this descriptor. | 97 | * @is_in_client_list: true if the client owns this descriptor. |
170 | * @cyclic: true if this is a cyclic job | 98 | * the previous one. |
171 | * | 99 | * |
172 | * This descriptor is used for both logical and physical transfers. | 100 | * This descriptor is used for both logical and physical transfers. |
173 | */ | 101 | */ |
@@ -214,7 +142,6 @@ struct d40_lcla_pool { | |||
214 | * channels. | 142 | * channels. |
215 | * | 143 | * |
216 | * @lock: A lock protection this entity. | 144 | * @lock: A lock protection this entity. |
217 | * @reserved: True if used by secure world or otherwise. | ||
218 | * @num: The physical channel number of this entity. | 145 | * @num: The physical channel number of this entity. |
219 | * @allocated_src: Bit mapped to show which src event line's are mapped to | 146 | * @allocated_src: Bit mapped to show which src event line's are mapped to |
220 | * this physical channel. Can also be free or physically allocated. | 147 | * this physical channel. Can also be free or physically allocated. |
@@ -224,7 +151,6 @@ struct d40_lcla_pool { | |||
224 | */ | 151 | */ |
225 | struct d40_phy_res { | 152 | struct d40_phy_res { |
226 | spinlock_t lock; | 153 | spinlock_t lock; |
227 | bool reserved; | ||
228 | int num; | 154 | int num; |
229 | u32 allocated_src; | 155 | u32 allocated_src; |
230 | u32 allocated_dst; | 156 | u32 allocated_dst; |
@@ -237,6 +163,8 @@ struct d40_base; | |||
237 | * | 163 | * |
238 | * @lock: A spinlock to protect this struct. | 164 | * @lock: A spinlock to protect this struct. |
239 | * @log_num: The logical number, if any of this channel. | 165 | * @log_num: The logical number, if any of this channel. |
166 | * @completed: Starts with 1, after first interrupt it is set to dma engine's | ||
167 | * current cookie. | ||
240 | * @pending_tx: The number of pending transfers. Used between interrupt handler | 168 | * @pending_tx: The number of pending transfers. Used between interrupt handler |
241 | * and tasklet. | 169 | * and tasklet. |
242 | * @busy: Set to true when transfer is ongoing on this channel. | 170 | * @busy: Set to true when transfer is ongoing on this channel. |
@@ -256,6 +184,7 @@ struct d40_base; | |||
256 | * @src_def_cfg: Default cfg register setting for src. | 184 | * @src_def_cfg: Default cfg register setting for src. |
257 | * @dst_def_cfg: Default cfg register setting for dst. | 185 | * @dst_def_cfg: Default cfg register setting for dst. |
258 | * @log_def: Default logical channel settings. | 186 | * @log_def: Default logical channel settings. |
187 | * @lcla: Space for one dst src pair for logical channel transfers. | ||
259 | * @lcpa: Pointer to dst and src lcpa settings. | 188 | * @lcpa: Pointer to dst and src lcpa settings. |
260 | * @runtime_addr: runtime configured address. | 189 | * @runtime_addr: runtime configured address. |
261 | * @runtime_direction: runtime configured direction. | 190 | * @runtime_direction: runtime configured direction. |
@@ -265,6 +194,8 @@ struct d40_base; | |||
265 | struct d40_chan { | 194 | struct d40_chan { |
266 | spinlock_t lock; | 195 | spinlock_t lock; |
267 | int log_num; | 196 | int log_num; |
197 | /* ID of the most recent completed transfer */ | ||
198 | int completed; | ||
268 | int pending_tx; | 199 | int pending_tx; |
269 | bool busy; | 200 | bool busy; |
270 | struct d40_phy_res *phy_chan; | 201 | struct d40_phy_res *phy_chan; |
@@ -285,7 +216,7 @@ struct d40_chan { | |||
285 | struct d40_log_lli_full *lcpa; | 216 | struct d40_log_lli_full *lcpa; |
286 | /* Runtime reconfiguration */ | 217 | /* Runtime reconfiguration */ |
287 | dma_addr_t runtime_addr; | 218 | dma_addr_t runtime_addr; |
288 | enum dma_transfer_direction runtime_direction; | 219 | enum dma_data_direction runtime_direction; |
289 | }; | 220 | }; |
290 | 221 | ||
291 | /** | 222 | /** |
@@ -309,7 +240,6 @@ struct d40_chan { | |||
309 | * @dma_both: dma_device channels that can do both memcpy and slave transfers. | 240 | * @dma_both: dma_device channels that can do both memcpy and slave transfers. |
310 | * @dma_slave: dma_device channels that can do only do slave transfers. | 241 | * @dma_slave: dma_device channels that can do only do slave transfers. |
311 | * @dma_memcpy: dma_device channels that can do only do memcpy transfers. | 242 | * @dma_memcpy: dma_device channels that can do only do memcpy transfers. |
312 | * @phy_chans: Room for all possible physical channels in system. | ||
313 | * @log_chans: Room for all possible logical channels in system. | 243 | * @log_chans: Room for all possible logical channels in system. |
314 | * @lookup_log_chans: Used to map interrupt number to logical channel. Points | 244 | * @lookup_log_chans: Used to map interrupt number to logical channel. Points |
315 | * to log_chans entries. | 245 | * to log_chans entries. |
@@ -317,20 +247,12 @@ struct d40_chan { | |||
317 | * to phy_chans entries. | 247 | * to phy_chans entries. |
318 | * @plat_data: Pointer to provided platform_data which is the driver | 248 | * @plat_data: Pointer to provided platform_data which is the driver |
319 | * configuration. | 249 | * configuration. |
320 | * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla. | ||
321 | * @phy_res: Vector containing all physical channels. | 250 | * @phy_res: Vector containing all physical channels. |
322 | * @lcla_pool: lcla pool settings and data. | 251 | * @lcla_pool: lcla pool settings and data. |
323 | * @lcpa_base: The virtual mapped address of LCPA. | 252 | * @lcpa_base: The virtual mapped address of LCPA. |
324 | * @phy_lcpa: The physical address of the LCPA. | 253 | * @phy_lcpa: The physical address of the LCPA. |
325 | * @lcpa_size: The size of the LCPA area. | 254 | * @lcpa_size: The size of the LCPA area. |
326 | * @desc_slab: cache for descriptors. | 255 | * @desc_slab: cache for descriptors. |
327 | * @reg_val_backup: Here the values of some hardware registers are stored | ||
328 | * before the DMA is powered off. They are restored when the power is back on. | ||
329 | * @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and | ||
330 | * later. | ||
331 | * @reg_val_backup_chan: Backup data for standard channel parameter registers. | ||
332 | * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off. | ||
333 | * @initialized: true if the dma has been initialized | ||
334 | */ | 256 | */ |
335 | struct d40_base { | 257 | struct d40_base { |
336 | spinlock_t interrupt_lock; | 258 | spinlock_t interrupt_lock; |
@@ -352,7 +274,6 @@ struct d40_base { | |||
352 | struct d40_chan **lookup_log_chans; | 274 | struct d40_chan **lookup_log_chans; |
353 | struct d40_chan **lookup_phy_chans; | 275 | struct d40_chan **lookup_phy_chans; |
354 | struct stedma40_platform_data *plat_data; | 276 | struct stedma40_platform_data *plat_data; |
355 | struct regulator *lcpa_regulator; | ||
356 | /* Physical half channels */ | 277 | /* Physical half channels */ |
357 | struct d40_phy_res *phy_res; | 278 | struct d40_phy_res *phy_res; |
358 | struct d40_lcla_pool lcla_pool; | 279 | struct d40_lcla_pool lcla_pool; |
@@ -360,11 +281,6 @@ struct d40_base { | |||
360 | dma_addr_t phy_lcpa; | 281 | dma_addr_t phy_lcpa; |
361 | resource_size_t lcpa_size; | 282 | resource_size_t lcpa_size; |
362 | struct kmem_cache *desc_slab; | 283 | struct kmem_cache *desc_slab; |
363 | u32 reg_val_backup[BACKUP_REGS_SZ]; | ||
364 | u32 reg_val_backup_v3[BACKUP_REGS_SZ_V3]; | ||
365 | u32 *reg_val_backup_chan; | ||
366 | u16 gcc_pwr_off_mask; | ||
367 | bool initialized; | ||
368 | }; | 284 | }; |
369 | 285 | ||
370 | /** | 286 | /** |
@@ -562,14 +478,13 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c) | |||
562 | struct d40_desc *d; | 478 | struct d40_desc *d; |
563 | struct d40_desc *_d; | 479 | struct d40_desc *_d; |
564 | 480 | ||
565 | list_for_each_entry_safe(d, _d, &d40c->client, node) { | 481 | list_for_each_entry_safe(d, _d, &d40c->client, node) |
566 | if (async_tx_test_ack(&d->txd)) { | 482 | if (async_tx_test_ack(&d->txd)) { |
567 | d40_desc_remove(d); | 483 | d40_desc_remove(d); |
568 | desc = d; | 484 | desc = d; |
569 | memset(desc, 0, sizeof(*desc)); | 485 | memset(desc, 0, sizeof(*desc)); |
570 | break; | 486 | break; |
571 | } | 487 | } |
572 | } | ||
573 | } | 488 | } |
574 | 489 | ||
575 | if (!desc) | 490 | if (!desc) |
@@ -620,7 +535,6 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) | |||
620 | bool cyclic = desc->cyclic; | 535 | bool cyclic = desc->cyclic; |
621 | int curr_lcla = -EINVAL; | 536 | int curr_lcla = -EINVAL; |
622 | int first_lcla = 0; | 537 | int first_lcla = 0; |
623 | bool use_esram_lcla = chan->base->plat_data->use_esram_lcla; | ||
624 | bool linkback; | 538 | bool linkback; |
625 | 539 | ||
626 | /* | 540 | /* |
@@ -693,16 +607,11 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) | |||
693 | &lli->src[lli_current], | 607 | &lli->src[lli_current], |
694 | next_lcla, flags); | 608 | next_lcla, flags); |
695 | 609 | ||
696 | /* | 610 | dma_sync_single_range_for_device(chan->base->dev, |
697 | * Cache maintenance is not needed if lcla is | 611 | pool->dma_addr, lcla_offset, |
698 | * mapped in esram | 612 | 2 * sizeof(struct d40_log_lli), |
699 | */ | 613 | DMA_TO_DEVICE); |
700 | if (!use_esram_lcla) { | 614 | |
701 | dma_sync_single_range_for_device(chan->base->dev, | ||
702 | pool->dma_addr, lcla_offset, | ||
703 | 2 * sizeof(struct d40_log_lli), | ||
704 | DMA_TO_DEVICE); | ||
705 | } | ||
706 | curr_lcla = next_lcla; | 615 | curr_lcla = next_lcla; |
707 | 616 | ||
708 | if (curr_lcla == -EINVAL || curr_lcla == first_lcla) { | 617 | if (curr_lcla == -EINVAL || curr_lcla == first_lcla) { |
@@ -830,64 +739,10 @@ static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len, | |||
830 | return len; | 739 | return len; |
831 | } | 740 | } |
832 | 741 | ||
742 | /* Support functions for logical channels */ | ||
833 | 743 | ||
834 | #ifdef CONFIG_PM | 744 | static int d40_channel_execute_command(struct d40_chan *d40c, |
835 | static void dma40_backup(void __iomem *baseaddr, u32 *backup, | 745 | enum d40_command command) |
836 | u32 *regaddr, int num, bool save) | ||
837 | { | ||
838 | int i; | ||
839 | |||
840 | for (i = 0; i < num; i++) { | ||
841 | void __iomem *addr = baseaddr + regaddr[i]; | ||
842 | |||
843 | if (save) | ||
844 | backup[i] = readl_relaxed(addr); | ||
845 | else | ||
846 | writel_relaxed(backup[i], addr); | ||
847 | } | ||
848 | } | ||
849 | |||
850 | static void d40_save_restore_registers(struct d40_base *base, bool save) | ||
851 | { | ||
852 | int i; | ||
853 | |||
854 | /* Save/Restore channel specific registers */ | ||
855 | for (i = 0; i < base->num_phy_chans; i++) { | ||
856 | void __iomem *addr; | ||
857 | int idx; | ||
858 | |||
859 | if (base->phy_res[i].reserved) | ||
860 | continue; | ||
861 | |||
862 | addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA; | ||
863 | idx = i * ARRAY_SIZE(d40_backup_regs_chan); | ||
864 | |||
865 | dma40_backup(addr, &base->reg_val_backup_chan[idx], | ||
866 | d40_backup_regs_chan, | ||
867 | ARRAY_SIZE(d40_backup_regs_chan), | ||
868 | save); | ||
869 | } | ||
870 | |||
871 | /* Save/Restore global registers */ | ||
872 | dma40_backup(base->virtbase, base->reg_val_backup, | ||
873 | d40_backup_regs, ARRAY_SIZE(d40_backup_regs), | ||
874 | save); | ||
875 | |||
876 | /* Save/Restore registers only existing on dma40 v3 and later */ | ||
877 | if (base->rev >= 3) | ||
878 | dma40_backup(base->virtbase, base->reg_val_backup_v3, | ||
879 | d40_backup_regs_v3, | ||
880 | ARRAY_SIZE(d40_backup_regs_v3), | ||
881 | save); | ||
882 | } | ||
883 | #else | ||
884 | static void d40_save_restore_registers(struct d40_base *base, bool save) | ||
885 | { | ||
886 | } | ||
887 | #endif | ||
888 | |||
889 | static int __d40_execute_command_phy(struct d40_chan *d40c, | ||
890 | enum d40_command command) | ||
891 | { | 746 | { |
892 | u32 status; | 747 | u32 status; |
893 | int i; | 748 | int i; |
@@ -896,12 +751,6 @@ static int __d40_execute_command_phy(struct d40_chan *d40c, | |||
896 | unsigned long flags; | 751 | unsigned long flags; |
897 | u32 wmask; | 752 | u32 wmask; |
898 | 753 | ||
899 | if (command == D40_DMA_STOP) { | ||
900 | ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ); | ||
901 | if (ret) | ||
902 | return ret; | ||
903 | } | ||
904 | |||
905 | spin_lock_irqsave(&d40c->base->execmd_lock, flags); | 754 | spin_lock_irqsave(&d40c->base->execmd_lock, flags); |
906 | 755 | ||
907 | if (d40c->phy_chan->num % 2 == 0) | 756 | if (d40c->phy_chan->num % 2 == 0) |
@@ -995,109 +844,67 @@ static void d40_term_all(struct d40_chan *d40c) | |||
995 | } | 844 | } |
996 | 845 | ||
997 | d40c->pending_tx = 0; | 846 | d40c->pending_tx = 0; |
847 | d40c->busy = false; | ||
998 | } | 848 | } |
999 | 849 | ||
1000 | static void __d40_config_set_event(struct d40_chan *d40c, | 850 | static void __d40_config_set_event(struct d40_chan *d40c, bool enable, |
1001 | enum d40_events event_type, u32 event, | 851 | u32 event, int reg) |
1002 | int reg) | ||
1003 | { | 852 | { |
1004 | void __iomem *addr = chan_base(d40c) + reg; | 853 | void __iomem *addr = chan_base(d40c) + reg; |
1005 | int tries; | 854 | int tries; |
1006 | u32 status; | ||
1007 | |||
1008 | switch (event_type) { | ||
1009 | |||
1010 | case D40_DEACTIVATE_EVENTLINE: | ||
1011 | 855 | ||
856 | if (!enable) { | ||
1012 | writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) | 857 | writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) |
1013 | | ~D40_EVENTLINE_MASK(event), addr); | 858 | | ~D40_EVENTLINE_MASK(event), addr); |
1014 | break; | 859 | return; |
1015 | 860 | } | |
1016 | case D40_SUSPEND_REQ_EVENTLINE: | ||
1017 | status = (readl(addr) & D40_EVENTLINE_MASK(event)) >> | ||
1018 | D40_EVENTLINE_POS(event); | ||
1019 | |||
1020 | if (status == D40_DEACTIVATE_EVENTLINE || | ||
1021 | status == D40_SUSPEND_REQ_EVENTLINE) | ||
1022 | break; | ||
1023 | |||
1024 | writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event)) | ||
1025 | | ~D40_EVENTLINE_MASK(event), addr); | ||
1026 | |||
1027 | for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) { | ||
1028 | |||
1029 | status = (readl(addr) & D40_EVENTLINE_MASK(event)) >> | ||
1030 | D40_EVENTLINE_POS(event); | ||
1031 | |||
1032 | cpu_relax(); | ||
1033 | /* | ||
1034 | * Reduce the number of bus accesses while | ||
1035 | * waiting for the DMA to suspend. | ||
1036 | */ | ||
1037 | udelay(3); | ||
1038 | |||
1039 | if (status == D40_DEACTIVATE_EVENTLINE) | ||
1040 | break; | ||
1041 | } | ||
1042 | |||
1043 | if (tries == D40_SUSPEND_MAX_IT) { | ||
1044 | chan_err(d40c, | ||
1045 | "unable to stop the event_line chl %d (log: %d)" | ||
1046 | "status %x\n", d40c->phy_chan->num, | ||
1047 | d40c->log_num, status); | ||
1048 | } | ||
1049 | break; | ||
1050 | 861 | ||
1051 | case D40_ACTIVATE_EVENTLINE: | ||
1052 | /* | 862 | /* |
1053 | * The hardware sometimes doesn't register the enable when src and dst | 863 | * The hardware sometimes doesn't register the enable when src and dst |
1054 | * event lines are active on the same logical channel. Retry to ensure | 864 | * event lines are active on the same logical channel. Retry to ensure |
1055 | * it does. Usually only one retry is sufficient. | 865 | * it does. Usually only one retry is sufficient. |
1056 | */ | 866 | */ |
1057 | tries = 100; | 867 | tries = 100; |
1058 | while (--tries) { | 868 | while (--tries) { |
1059 | writel((D40_ACTIVATE_EVENTLINE << | 869 | writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) |
1060 | D40_EVENTLINE_POS(event)) | | 870 | | ~D40_EVENTLINE_MASK(event), addr); |
1061 | ~D40_EVENTLINE_MASK(event), addr); | ||
1062 | |||
1063 | if (readl(addr) & D40_EVENTLINE_MASK(event)) | ||
1064 | break; | ||
1065 | } | ||
1066 | |||
1067 | if (tries != 99) | ||
1068 | dev_dbg(chan2dev(d40c), | ||
1069 | "[%s] workaround enable S%cLNK (%d tries)\n", | ||
1070 | __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D', | ||
1071 | 100 - tries); | ||
1072 | 871 | ||
1073 | WARN_ON(!tries); | 872 | if (readl(addr) & D40_EVENTLINE_MASK(event)) |
1074 | break; | 873 | break; |
874 | } | ||
1075 | 875 | ||
1076 | case D40_ROUND_EVENTLINE: | 876 | if (tries != 99) |
1077 | BUG(); | 877 | dev_dbg(chan2dev(d40c), |
1078 | break; | 878 | "[%s] workaround enable S%cLNK (%d tries)\n", |
879 | __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D', | ||
880 | 100 - tries); | ||
1079 | 881 | ||
1080 | } | 882 | WARN_ON(!tries); |
1081 | } | 883 | } |
1082 | 884 | ||
1083 | static void d40_config_set_event(struct d40_chan *d40c, | 885 | static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) |
1084 | enum d40_events event_type) | ||
1085 | { | 886 | { |
887 | unsigned long flags; | ||
888 | |||
889 | spin_lock_irqsave(&d40c->phy_chan->lock, flags); | ||
890 | |||
1086 | /* Enable event line connected to device (or memcpy) */ | 891 | /* Enable event line connected to device (or memcpy) */ |
1087 | if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || | 892 | if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || |
1088 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { | 893 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { |
1089 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | 894 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); |
1090 | 895 | ||
1091 | __d40_config_set_event(d40c, event_type, event, | 896 | __d40_config_set_event(d40c, do_enable, event, |
1092 | D40_CHAN_REG_SSLNK); | 897 | D40_CHAN_REG_SSLNK); |
1093 | } | 898 | } |
1094 | 899 | ||
1095 | if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { | 900 | if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { |
1096 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); | 901 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); |
1097 | 902 | ||
1098 | __d40_config_set_event(d40c, event_type, event, | 903 | __d40_config_set_event(d40c, do_enable, event, |
1099 | D40_CHAN_REG_SDLNK); | 904 | D40_CHAN_REG_SDLNK); |
1100 | } | 905 | } |
906 | |||
907 | spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); | ||
1101 | } | 908 | } |
1102 | 909 | ||
1103 | static u32 d40_chan_has_events(struct d40_chan *d40c) | 910 | static u32 d40_chan_has_events(struct d40_chan *d40c) |
@@ -1111,64 +918,6 @@ static u32 d40_chan_has_events(struct d40_chan *d40c) | |||
1111 | return val; | 918 | return val; |
1112 | } | 919 | } |
1113 | 920 | ||
1114 | static int | ||
1115 | __d40_execute_command_log(struct d40_chan *d40c, enum d40_command command) | ||
1116 | { | ||
1117 | unsigned long flags; | ||
1118 | int ret = 0; | ||
1119 | u32 active_status; | ||
1120 | void __iomem *active_reg; | ||
1121 | |||
1122 | if (d40c->phy_chan->num % 2 == 0) | ||
1123 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; | ||
1124 | else | ||
1125 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; | ||
1126 | |||
1127 | |||
1128 | spin_lock_irqsave(&d40c->phy_chan->lock, flags); | ||
1129 | |||
1130 | switch (command) { | ||
1131 | case D40_DMA_STOP: | ||
1132 | case D40_DMA_SUSPEND_REQ: | ||
1133 | |||
1134 | active_status = (readl(active_reg) & | ||
1135 | D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> | ||
1136 | D40_CHAN_POS(d40c->phy_chan->num); | ||
1137 | |||
1138 | if (active_status == D40_DMA_RUN) | ||
1139 | d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE); | ||
1140 | else | ||
1141 | d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE); | ||
1142 | |||
1143 | if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP)) | ||
1144 | ret = __d40_execute_command_phy(d40c, command); | ||
1145 | |||
1146 | break; | ||
1147 | |||
1148 | case D40_DMA_RUN: | ||
1149 | |||
1150 | d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE); | ||
1151 | ret = __d40_execute_command_phy(d40c, command); | ||
1152 | break; | ||
1153 | |||
1154 | case D40_DMA_SUSPENDED: | ||
1155 | BUG(); | ||
1156 | break; | ||
1157 | } | ||
1158 | |||
1159 | spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); | ||
1160 | return ret; | ||
1161 | } | ||
1162 | |||
1163 | static int d40_channel_execute_command(struct d40_chan *d40c, | ||
1164 | enum d40_command command) | ||
1165 | { | ||
1166 | if (chan_is_logical(d40c)) | ||
1167 | return __d40_execute_command_log(d40c, command); | ||
1168 | else | ||
1169 | return __d40_execute_command_phy(d40c, command); | ||
1170 | } | ||
1171 | |||
1172 | static u32 d40_get_prmo(struct d40_chan *d40c) | 921 | static u32 d40_get_prmo(struct d40_chan *d40c) |
1173 | { | 922 | { |
1174 | static const unsigned int phy_map[] = { | 923 | static const unsigned int phy_map[] = { |
@@ -1223,10 +972,6 @@ static void d40_config_write(struct d40_chan *d40c) | |||
1223 | /* Set LIDX for lcla */ | 972 | /* Set LIDX for lcla */ |
1224 | writel(lidx, chanbase + D40_CHAN_REG_SSELT); | 973 | writel(lidx, chanbase + D40_CHAN_REG_SSELT); |
1225 | writel(lidx, chanbase + D40_CHAN_REG_SDELT); | 974 | writel(lidx, chanbase + D40_CHAN_REG_SDELT); |
1226 | |||
1227 | /* Clear LNK which will be used by d40_chan_has_events() */ | ||
1228 | writel(0, chanbase + D40_CHAN_REG_SSLNK); | ||
1229 | writel(0, chanbase + D40_CHAN_REG_SDLNK); | ||
1230 | } | 975 | } |
1231 | } | 976 | } |
1232 | 977 | ||
@@ -1267,13 +1012,19 @@ static int d40_pause(struct d40_chan *d40c) | |||
1267 | if (!d40c->busy) | 1012 | if (!d40c->busy) |
1268 | return 0; | 1013 | return 0; |
1269 | 1014 | ||
1270 | pm_runtime_get_sync(d40c->base->dev); | ||
1271 | spin_lock_irqsave(&d40c->lock, flags); | 1015 | spin_lock_irqsave(&d40c->lock, flags); |
1272 | 1016 | ||
1273 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 1017 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); |
1018 | if (res == 0) { | ||
1019 | if (chan_is_logical(d40c)) { | ||
1020 | d40_config_set_event(d40c, false); | ||
1021 | /* Resume the other logical channels if any */ | ||
1022 | if (d40_chan_has_events(d40c)) | ||
1023 | res = d40_channel_execute_command(d40c, | ||
1024 | D40_DMA_RUN); | ||
1025 | } | ||
1026 | } | ||
1274 | 1027 | ||
1275 | pm_runtime_mark_last_busy(d40c->base->dev); | ||
1276 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
1277 | spin_unlock_irqrestore(&d40c->lock, flags); | 1028 | spin_unlock_irqrestore(&d40c->lock, flags); |
1278 | return res; | 1029 | return res; |
1279 | } | 1030 | } |
@@ -1287,18 +1038,44 @@ static int d40_resume(struct d40_chan *d40c) | |||
1287 | return 0; | 1038 | return 0; |
1288 | 1039 | ||
1289 | spin_lock_irqsave(&d40c->lock, flags); | 1040 | spin_lock_irqsave(&d40c->lock, flags); |
1290 | pm_runtime_get_sync(d40c->base->dev); | 1041 | |
1042 | if (d40c->base->rev == 0) | ||
1043 | if (chan_is_logical(d40c)) { | ||
1044 | res = d40_channel_execute_command(d40c, | ||
1045 | D40_DMA_SUSPEND_REQ); | ||
1046 | goto no_suspend; | ||
1047 | } | ||
1291 | 1048 | ||
1292 | /* If bytes left to transfer or linked tx resume job */ | 1049 | /* If bytes left to transfer or linked tx resume job */ |
1293 | if (d40_residue(d40c) || d40_tx_is_linked(d40c)) | 1050 | if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { |
1051 | |||
1052 | if (chan_is_logical(d40c)) | ||
1053 | d40_config_set_event(d40c, true); | ||
1054 | |||
1294 | res = d40_channel_execute_command(d40c, D40_DMA_RUN); | 1055 | res = d40_channel_execute_command(d40c, D40_DMA_RUN); |
1056 | } | ||
1295 | 1057 | ||
1296 | pm_runtime_mark_last_busy(d40c->base->dev); | 1058 | no_suspend: |
1297 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
1298 | spin_unlock_irqrestore(&d40c->lock, flags); | 1059 | spin_unlock_irqrestore(&d40c->lock, flags); |
1299 | return res; | 1060 | return res; |
1300 | } | 1061 | } |
1301 | 1062 | ||
1063 | static int d40_terminate_all(struct d40_chan *chan) | ||
1064 | { | ||
1065 | unsigned long flags; | ||
1066 | int ret = 0; | ||
1067 | |||
1068 | ret = d40_pause(chan); | ||
1069 | if (!ret && chan_is_physical(chan)) | ||
1070 | ret = d40_channel_execute_command(chan, D40_DMA_STOP); | ||
1071 | |||
1072 | spin_lock_irqsave(&chan->lock, flags); | ||
1073 | d40_term_all(chan); | ||
1074 | spin_unlock_irqrestore(&chan->lock, flags); | ||
1075 | |||
1076 | return ret; | ||
1077 | } | ||
1078 | |||
1302 | static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) | 1079 | static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) |
1303 | { | 1080 | { |
1304 | struct d40_chan *d40c = container_of(tx->chan, | 1081 | struct d40_chan *d40c = container_of(tx->chan, |
@@ -1306,18 +1083,39 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) | |||
1306 | chan); | 1083 | chan); |
1307 | struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); | 1084 | struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); |
1308 | unsigned long flags; | 1085 | unsigned long flags; |
1309 | dma_cookie_t cookie; | ||
1310 | 1086 | ||
1311 | spin_lock_irqsave(&d40c->lock, flags); | 1087 | spin_lock_irqsave(&d40c->lock, flags); |
1312 | cookie = dma_cookie_assign(tx); | 1088 | |
1089 | d40c->chan.cookie++; | ||
1090 | |||
1091 | if (d40c->chan.cookie < 0) | ||
1092 | d40c->chan.cookie = 1; | ||
1093 | |||
1094 | d40d->txd.cookie = d40c->chan.cookie; | ||
1095 | |||
1313 | d40_desc_queue(d40c, d40d); | 1096 | d40_desc_queue(d40c, d40d); |
1097 | |||
1314 | spin_unlock_irqrestore(&d40c->lock, flags); | 1098 | spin_unlock_irqrestore(&d40c->lock, flags); |
1315 | 1099 | ||
1316 | return cookie; | 1100 | return tx->cookie; |
1317 | } | 1101 | } |
1318 | 1102 | ||
1319 | static int d40_start(struct d40_chan *d40c) | 1103 | static int d40_start(struct d40_chan *d40c) |
1320 | { | 1104 | { |
1105 | if (d40c->base->rev == 0) { | ||
1106 | int err; | ||
1107 | |||
1108 | if (chan_is_logical(d40c)) { | ||
1109 | err = d40_channel_execute_command(d40c, | ||
1110 | D40_DMA_SUSPEND_REQ); | ||
1111 | if (err) | ||
1112 | return err; | ||
1113 | } | ||
1114 | } | ||
1115 | |||
1116 | if (chan_is_logical(d40c)) | ||
1117 | d40_config_set_event(d40c, true); | ||
1118 | |||
1321 | return d40_channel_execute_command(d40c, D40_DMA_RUN); | 1119 | return d40_channel_execute_command(d40c, D40_DMA_RUN); |
1322 | } | 1120 | } |
1323 | 1121 | ||
@@ -1330,10 +1128,7 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c) | |||
1330 | d40d = d40_first_queued(d40c); | 1128 | d40d = d40_first_queued(d40c); |
1331 | 1129 | ||
1332 | if (d40d != NULL) { | 1130 | if (d40d != NULL) { |
1333 | if (!d40c->busy) { | 1131 | d40c->busy = true; |
1334 | d40c->busy = true; | ||
1335 | pm_runtime_get_sync(d40c->base->dev); | ||
1336 | } | ||
1337 | 1132 | ||
1338 | /* Remove from queue */ | 1133 | /* Remove from queue */ |
1339 | d40_desc_remove(d40d); | 1134 | d40_desc_remove(d40d); |
@@ -1394,8 +1189,6 @@ static void dma_tc_handle(struct d40_chan *d40c) | |||
1394 | 1189 | ||
1395 | if (d40_queue_start(d40c) == NULL) | 1190 | if (d40_queue_start(d40c) == NULL) |
1396 | d40c->busy = false; | 1191 | d40c->busy = false; |
1397 | pm_runtime_mark_last_busy(d40c->base->dev); | ||
1398 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
1399 | } | 1192 | } |
1400 | 1193 | ||
1401 | d40c->pending_tx++; | 1194 | d40c->pending_tx++; |
@@ -1419,7 +1212,7 @@ static void dma_tasklet(unsigned long data) | |||
1419 | goto err; | 1212 | goto err; |
1420 | 1213 | ||
1421 | if (!d40d->cyclic) | 1214 | if (!d40d->cyclic) |
1422 | dma_cookie_complete(&d40d->txd); | 1215 | d40c->completed = d40d->txd.cookie; |
1423 | 1216 | ||
1424 | /* | 1217 | /* |
1425 | * If terminating a channel pending_tx is set to zero. | 1218 | * If terminating a channel pending_tx is set to zero. |
@@ -1460,8 +1253,8 @@ static void dma_tasklet(unsigned long data) | |||
1460 | 1253 | ||
1461 | return; | 1254 | return; |
1462 | 1255 | ||
1463 | err: | 1256 | err: |
1464 | /* Rescue manouver if receiving double interrupts */ | 1257 | /* Rescue manoeuvre if receiving double interrupts */ |
1465 | if (d40c->pending_tx > 0) | 1258 | if (d40c->pending_tx > 0) |
1466 | d40c->pending_tx--; | 1259 | d40c->pending_tx--; |
1467 | spin_unlock_irqrestore(&d40c->lock, flags); | 1260 | spin_unlock_irqrestore(&d40c->lock, flags); |
@@ -1611,16 +1404,11 @@ static int d40_validate_conf(struct d40_chan *d40c, | |||
1611 | return res; | 1404 | return res; |
1612 | } | 1405 | } |
1613 | 1406 | ||
1614 | static bool d40_alloc_mask_set(struct d40_phy_res *phy, | 1407 | static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src, |
1615 | bool is_src, int log_event_line, bool is_log, | 1408 | int log_event_line, bool is_log) |
1616 | bool *first_user) | ||
1617 | { | 1409 | { |
1618 | unsigned long flags; | 1410 | unsigned long flags; |
1619 | spin_lock_irqsave(&phy->lock, flags); | 1411 | spin_lock_irqsave(&phy->lock, flags); |
1620 | |||
1621 | *first_user = ((phy->allocated_src | phy->allocated_dst) | ||
1622 | == D40_ALLOC_FREE); | ||
1623 | |||
1624 | if (!is_log) { | 1412 | if (!is_log) { |
1625 | /* Physical interrupts are masked per physical full channel */ | 1413 | /* Physical interrupts are masked per physical full channel */ |
1626 | if (phy->allocated_src == D40_ALLOC_FREE && | 1414 | if (phy->allocated_src == D40_ALLOC_FREE && |
@@ -1701,7 +1489,7 @@ out: | |||
1701 | return is_free; | 1489 | return is_free; |
1702 | } | 1490 | } |
1703 | 1491 | ||
1704 | static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) | 1492 | static int d40_allocate_channel(struct d40_chan *d40c) |
1705 | { | 1493 | { |
1706 | int dev_type; | 1494 | int dev_type; |
1707 | int event_group; | 1495 | int event_group; |
@@ -1737,8 +1525,7 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) | |||
1737 | for (i = 0; i < d40c->base->num_phy_chans; i++) { | 1525 | for (i = 0; i < d40c->base->num_phy_chans; i++) { |
1738 | 1526 | ||
1739 | if (d40_alloc_mask_set(&phys[i], is_src, | 1527 | if (d40_alloc_mask_set(&phys[i], is_src, |
1740 | 0, is_log, | 1528 | 0, is_log)) |
1741 | first_phy_user)) | ||
1742 | goto found_phy; | 1529 | goto found_phy; |
1743 | } | 1530 | } |
1744 | } else | 1531 | } else |
@@ -1748,8 +1535,7 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) | |||
1748 | if (d40_alloc_mask_set(&phys[i], | 1535 | if (d40_alloc_mask_set(&phys[i], |
1749 | is_src, | 1536 | is_src, |
1750 | 0, | 1537 | 0, |
1751 | is_log, | 1538 | is_log)) |
1752 | first_phy_user)) | ||
1753 | goto found_phy; | 1539 | goto found_phy; |
1754 | } | 1540 | } |
1755 | } | 1541 | } |
@@ -1765,25 +1551,6 @@ found_phy: | |||
1765 | /* Find logical channel */ | 1551 | /* Find logical channel */ |
1766 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { | 1552 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { |
1767 | int phy_num = j + event_group * 2; | 1553 | int phy_num = j + event_group * 2; |
1768 | |||
1769 | if (d40c->dma_cfg.use_fixed_channel) { | ||
1770 | i = d40c->dma_cfg.phy_channel; | ||
1771 | |||
1772 | if ((i != phy_num) && (i != phy_num + 1)) { | ||
1773 | dev_err(chan2dev(d40c), | ||
1774 | "invalid fixed phy channel %d\n", i); | ||
1775 | return -EINVAL; | ||
1776 | } | ||
1777 | |||
1778 | if (d40_alloc_mask_set(&phys[i], is_src, event_line, | ||
1779 | is_log, first_phy_user)) | ||
1780 | goto found_log; | ||
1781 | |||
1782 | dev_err(chan2dev(d40c), | ||
1783 | "could not allocate fixed phy channel %d\n", i); | ||
1784 | return -EINVAL; | ||
1785 | } | ||
1786 | |||
1787 | /* | 1554 | /* |
1788 | * Spread logical channels across all available physical rather | 1555 | * Spread logical channels across all available physical rather |
1789 | * than pack every logical channel at the first available phy | 1556 | * than pack every logical channel at the first available phy |
@@ -1792,15 +1559,13 @@ found_phy: | |||
1792 | if (is_src) { | 1559 | if (is_src) { |
1793 | for (i = phy_num; i < phy_num + 2; i++) { | 1560 | for (i = phy_num; i < phy_num + 2; i++) { |
1794 | if (d40_alloc_mask_set(&phys[i], is_src, | 1561 | if (d40_alloc_mask_set(&phys[i], is_src, |
1795 | event_line, is_log, | 1562 | event_line, is_log)) |
1796 | first_phy_user)) | ||
1797 | goto found_log; | 1563 | goto found_log; |
1798 | } | 1564 | } |
1799 | } else { | 1565 | } else { |
1800 | for (i = phy_num + 1; i >= phy_num; i--) { | 1566 | for (i = phy_num + 1; i >= phy_num; i--) { |
1801 | if (d40_alloc_mask_set(&phys[i], is_src, | 1567 | if (d40_alloc_mask_set(&phys[i], is_src, |
1802 | event_line, is_log, | 1568 | event_line, is_log)) |
1803 | first_phy_user)) | ||
1804 | goto found_log; | 1569 | goto found_log; |
1805 | } | 1570 | } |
1806 | } | 1571 | } |
@@ -1842,6 +1607,7 @@ static int d40_config_memcpy(struct d40_chan *d40c) | |||
1842 | return 0; | 1607 | return 0; |
1843 | } | 1608 | } |
1844 | 1609 | ||
1610 | |||
1845 | static int d40_free_dma(struct d40_chan *d40c) | 1611 | static int d40_free_dma(struct d40_chan *d40c) |
1846 | { | 1612 | { |
1847 | 1613 | ||
@@ -1876,33 +1642,50 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1876 | return -EINVAL; | 1642 | return -EINVAL; |
1877 | } | 1643 | } |
1878 | 1644 | ||
1879 | pm_runtime_get_sync(d40c->base->dev); | 1645 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); |
1880 | res = d40_channel_execute_command(d40c, D40_DMA_STOP); | ||
1881 | if (res) { | 1646 | if (res) { |
1882 | chan_err(d40c, "stop failed\n"); | 1647 | chan_err(d40c, "suspend failed\n"); |
1883 | goto out; | 1648 | return res; |
1884 | } | 1649 | } |
1885 | 1650 | ||
1886 | d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0); | 1651 | if (chan_is_logical(d40c)) { |
1652 | /* Release logical channel, deactivate the event line */ | ||
1887 | 1653 | ||
1888 | if (chan_is_logical(d40c)) | 1654 | d40_config_set_event(d40c, false); |
1889 | d40c->base->lookup_log_chans[d40c->log_num] = NULL; | 1655 | d40c->base->lookup_log_chans[d40c->log_num] = NULL; |
1890 | else | ||
1891 | d40c->base->lookup_phy_chans[phy->num] = NULL; | ||
1892 | 1656 | ||
1893 | if (d40c->busy) { | 1657 | /* |
1894 | pm_runtime_mark_last_busy(d40c->base->dev); | 1658 | * Check if there are more logical allocation |
1895 | pm_runtime_put_autosuspend(d40c->base->dev); | 1659 | * on this phy channel. |
1660 | */ | ||
1661 | if (!d40_alloc_mask_free(phy, is_src, event)) { | ||
1662 | /* Resume the other logical channels if any */ | ||
1663 | if (d40_chan_has_events(d40c)) { | ||
1664 | res = d40_channel_execute_command(d40c, | ||
1665 | D40_DMA_RUN); | ||
1666 | if (res) { | ||
1667 | chan_err(d40c, | ||
1668 | "Executing RUN command\n"); | ||
1669 | return res; | ||
1670 | } | ||
1671 | } | ||
1672 | return 0; | ||
1673 | } | ||
1674 | } else { | ||
1675 | (void) d40_alloc_mask_free(phy, is_src, 0); | ||
1896 | } | 1676 | } |
1897 | 1677 | ||
1898 | d40c->busy = false; | 1678 | /* Release physical channel */ |
1679 | res = d40_channel_execute_command(d40c, D40_DMA_STOP); | ||
1680 | if (res) { | ||
1681 | chan_err(d40c, "Failed to stop channel\n"); | ||
1682 | return res; | ||
1683 | } | ||
1899 | d40c->phy_chan = NULL; | 1684 | d40c->phy_chan = NULL; |
1900 | d40c->configured = false; | 1685 | d40c->configured = false; |
1901 | out: | 1686 | d40c->base->lookup_phy_chans[phy->num] = NULL; |
1902 | 1687 | ||
1903 | pm_runtime_mark_last_busy(d40c->base->dev); | 1688 | return 0; |
1904 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
1905 | return res; | ||
1906 | } | 1689 | } |
1907 | 1690 | ||
1908 | static bool d40_is_paused(struct d40_chan *d40c) | 1691 | static bool d40_is_paused(struct d40_chan *d40c) |
@@ -2071,7 +1854,7 @@ err: | |||
2071 | } | 1854 | } |
2072 | 1855 | ||
2073 | static dma_addr_t | 1856 | static dma_addr_t |
2074 | d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction) | 1857 | d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction) |
2075 | { | 1858 | { |
2076 | struct stedma40_platform_data *plat = chan->base->plat_data; | 1859 | struct stedma40_platform_data *plat = chan->base->plat_data; |
2077 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; | 1860 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; |
@@ -2080,9 +1863,9 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction) | |||
2080 | if (chan->runtime_addr) | 1863 | if (chan->runtime_addr) |
2081 | return chan->runtime_addr; | 1864 | return chan->runtime_addr; |
2082 | 1865 | ||
2083 | if (direction == DMA_DEV_TO_MEM) | 1866 | if (direction == DMA_FROM_DEVICE) |
2084 | addr = plat->dev_rx[cfg->src_dev_type]; | 1867 | addr = plat->dev_rx[cfg->src_dev_type]; |
2085 | else if (direction == DMA_MEM_TO_DEV) | 1868 | else if (direction == DMA_TO_DEVICE) |
2086 | addr = plat->dev_tx[cfg->dst_dev_type]; | 1869 | addr = plat->dev_tx[cfg->dst_dev_type]; |
2087 | 1870 | ||
2088 | return addr; | 1871 | return addr; |
@@ -2091,7 +1874,7 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction) | |||
2091 | static struct dma_async_tx_descriptor * | 1874 | static struct dma_async_tx_descriptor * |
2092 | d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, | 1875 | d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, |
2093 | struct scatterlist *sg_dst, unsigned int sg_len, | 1876 | struct scatterlist *sg_dst, unsigned int sg_len, |
2094 | enum dma_transfer_direction direction, unsigned long dma_flags) | 1877 | enum dma_data_direction direction, unsigned long dma_flags) |
2095 | { | 1878 | { |
2096 | struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); | 1879 | struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); |
2097 | dma_addr_t src_dev_addr = 0; | 1880 | dma_addr_t src_dev_addr = 0; |
@@ -2115,12 +1898,12 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, | |||
2115 | if (sg_next(&sg_src[sg_len - 1]) == sg_src) | 1898 | if (sg_next(&sg_src[sg_len - 1]) == sg_src) |
2116 | desc->cyclic = true; | 1899 | desc->cyclic = true; |
2117 | 1900 | ||
2118 | if (direction != DMA_TRANS_NONE) { | 1901 | if (direction != DMA_NONE) { |
2119 | dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); | 1902 | dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); |
2120 | 1903 | ||
2121 | if (direction == DMA_DEV_TO_MEM) | 1904 | if (direction == DMA_FROM_DEVICE) |
2122 | src_dev_addr = dev_addr; | 1905 | src_dev_addr = dev_addr; |
2123 | else if (direction == DMA_MEM_TO_DEV) | 1906 | else if (direction == DMA_TO_DEVICE) |
2124 | dst_dev_addr = dev_addr; | 1907 | dst_dev_addr = dev_addr; |
2125 | } | 1908 | } |
2126 | 1909 | ||
@@ -2217,7 +2000,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
2217 | bool is_free_phy; | 2000 | bool is_free_phy; |
2218 | spin_lock_irqsave(&d40c->lock, flags); | 2001 | spin_lock_irqsave(&d40c->lock, flags); |
2219 | 2002 | ||
2220 | dma_cookie_init(chan); | 2003 | d40c->completed = chan->cookie = 1; |
2221 | 2004 | ||
2222 | /* If no dma configuration is set use default configuration (memcpy) */ | 2005 | /* If no dma configuration is set use default configuration (memcpy) */ |
2223 | if (!d40c->configured) { | 2006 | if (!d40c->configured) { |
@@ -2227,15 +2010,14 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
2227 | goto fail; | 2010 | goto fail; |
2228 | } | 2011 | } |
2229 | } | 2012 | } |
2013 | is_free_phy = (d40c->phy_chan == NULL); | ||
2230 | 2014 | ||
2231 | err = d40_allocate_channel(d40c, &is_free_phy); | 2015 | err = d40_allocate_channel(d40c); |
2232 | if (err) { | 2016 | if (err) { |
2233 | chan_err(d40c, "Failed to allocate channel\n"); | 2017 | chan_err(d40c, "Failed to allocate channel\n"); |
2234 | d40c->configured = false; | ||
2235 | goto fail; | 2018 | goto fail; |
2236 | } | 2019 | } |
2237 | 2020 | ||
2238 | pm_runtime_get_sync(d40c->base->dev); | ||
2239 | /* Fill in basic CFG register values */ | 2021 | /* Fill in basic CFG register values */ |
2240 | d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, | 2022 | d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, |
2241 | &d40c->dst_def_cfg, chan_is_logical(d40c)); | 2023 | &d40c->dst_def_cfg, chan_is_logical(d40c)); |
@@ -2255,12 +2037,6 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
2255 | D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; | 2037 | D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; |
2256 | } | 2038 | } |
2257 | 2039 | ||
2258 | dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n", | ||
2259 | chan_is_logical(d40c) ? "logical" : "physical", | ||
2260 | d40c->phy_chan->num, | ||
2261 | d40c->dma_cfg.use_fixed_channel ? ", fixed" : ""); | ||
2262 | |||
2263 | |||
2264 | /* | 2040 | /* |
2265 | * Only write channel configuration to the DMA if the physical | 2041 | * Only write channel configuration to the DMA if the physical |
2266 | * resource is free. In case of multiple logical channels | 2042 | * resource is free. In case of multiple logical channels |
@@ -2269,8 +2045,6 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
2269 | if (is_free_phy) | 2045 | if (is_free_phy) |
2270 | d40_config_write(d40c); | 2046 | d40_config_write(d40c); |
2271 | fail: | 2047 | fail: |
2272 | pm_runtime_mark_last_busy(d40c->base->dev); | ||
2273 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
2274 | spin_unlock_irqrestore(&d40c->lock, flags); | 2048 | spin_unlock_irqrestore(&d40c->lock, flags); |
2275 | return err; | 2049 | return err; |
2276 | } | 2050 | } |
@@ -2333,11 +2107,10 @@ d40_prep_memcpy_sg(struct dma_chan *chan, | |||
2333 | static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | 2107 | static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, |
2334 | struct scatterlist *sgl, | 2108 | struct scatterlist *sgl, |
2335 | unsigned int sg_len, | 2109 | unsigned int sg_len, |
2336 | enum dma_transfer_direction direction, | 2110 | enum dma_data_direction direction, |
2337 | unsigned long dma_flags, | 2111 | unsigned long dma_flags) |
2338 | void *context) | ||
2339 | { | 2112 | { |
2340 | if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) | 2113 | if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) |
2341 | return NULL; | 2114 | return NULL; |
2342 | 2115 | ||
2343 | return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); | 2116 | return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); |
@@ -2346,8 +2119,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | |||
2346 | static struct dma_async_tx_descriptor * | 2119 | static struct dma_async_tx_descriptor * |
2347 | dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, | 2120 | dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, |
2348 | size_t buf_len, size_t period_len, | 2121 | size_t buf_len, size_t period_len, |
2349 | enum dma_transfer_direction direction, unsigned long flags, | 2122 | enum dma_data_direction direction) |
2350 | void *context) | ||
2351 | { | 2123 | { |
2352 | unsigned int periods = buf_len / period_len; | 2124 | unsigned int periods = buf_len / period_len; |
2353 | struct dma_async_tx_descriptor *txd; | 2125 | struct dma_async_tx_descriptor *txd; |
@@ -2362,7 +2134,7 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, | |||
2362 | } | 2134 | } |
2363 | 2135 | ||
2364 | sg[periods].offset = 0; | 2136 | sg[periods].offset = 0; |
2365 | sg_dma_len(&sg[periods]) = 0; | 2137 | sg[periods].length = 0; |
2366 | sg[periods].page_link = | 2138 | sg[periods].page_link = |
2367 | ((unsigned long)sg | 0x01) & ~0x02; | 2139 | ((unsigned long)sg | 0x01) & ~0x02; |
2368 | 2140 | ||
@@ -2379,19 +2151,25 @@ static enum dma_status d40_tx_status(struct dma_chan *chan, | |||
2379 | struct dma_tx_state *txstate) | 2151 | struct dma_tx_state *txstate) |
2380 | { | 2152 | { |
2381 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | 2153 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
2382 | enum dma_status ret; | 2154 | dma_cookie_t last_used; |
2155 | dma_cookie_t last_complete; | ||
2156 | int ret; | ||
2383 | 2157 | ||
2384 | if (d40c->phy_chan == NULL) { | 2158 | if (d40c->phy_chan == NULL) { |
2385 | chan_err(d40c, "Cannot read status of unallocated channel\n"); | 2159 | chan_err(d40c, "Cannot read status of unallocated channel\n"); |
2386 | return -EINVAL; | 2160 | return -EINVAL; |
2387 | } | 2161 | } |
2388 | 2162 | ||
2389 | ret = dma_cookie_status(chan, cookie, txstate); | 2163 | last_complete = d40c->completed; |
2390 | if (ret != DMA_SUCCESS) | 2164 | last_used = chan->cookie; |
2391 | dma_set_residue(txstate, stedma40_residue(chan)); | ||
2392 | 2165 | ||
2393 | if (d40_is_paused(d40c)) | 2166 | if (d40_is_paused(d40c)) |
2394 | ret = DMA_PAUSED; | 2167 | ret = DMA_PAUSED; |
2168 | else | ||
2169 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
2170 | |||
2171 | dma_set_tx_state(txstate, last_complete, last_used, | ||
2172 | stedma40_residue(chan)); | ||
2395 | 2173 | ||
2396 | return ret; | 2174 | return ret; |
2397 | } | 2175 | } |
@@ -2417,31 +2195,6 @@ static void d40_issue_pending(struct dma_chan *chan) | |||
2417 | spin_unlock_irqrestore(&d40c->lock, flags); | 2195 | spin_unlock_irqrestore(&d40c->lock, flags); |
2418 | } | 2196 | } |
2419 | 2197 | ||
2420 | static void d40_terminate_all(struct dma_chan *chan) | ||
2421 | { | ||
2422 | unsigned long flags; | ||
2423 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | ||
2424 | int ret; | ||
2425 | |||
2426 | spin_lock_irqsave(&d40c->lock, flags); | ||
2427 | |||
2428 | pm_runtime_get_sync(d40c->base->dev); | ||
2429 | ret = d40_channel_execute_command(d40c, D40_DMA_STOP); | ||
2430 | if (ret) | ||
2431 | chan_err(d40c, "Failed to stop channel\n"); | ||
2432 | |||
2433 | d40_term_all(d40c); | ||
2434 | pm_runtime_mark_last_busy(d40c->base->dev); | ||
2435 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
2436 | if (d40c->busy) { | ||
2437 | pm_runtime_mark_last_busy(d40c->base->dev); | ||
2438 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
2439 | } | ||
2440 | d40c->busy = false; | ||
2441 | |||
2442 | spin_unlock_irqrestore(&d40c->lock, flags); | ||
2443 | } | ||
2444 | |||
2445 | static int | 2198 | static int |
2446 | dma40_config_to_halfchannel(struct d40_chan *d40c, | 2199 | dma40_config_to_halfchannel(struct d40_chan *d40c, |
2447 | struct stedma40_half_channel_info *info, | 2200 | struct stedma40_half_channel_info *info, |
@@ -2515,7 +2268,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, | |||
2515 | dst_addr_width = config->dst_addr_width; | 2268 | dst_addr_width = config->dst_addr_width; |
2516 | dst_maxburst = config->dst_maxburst; | 2269 | dst_maxburst = config->dst_maxburst; |
2517 | 2270 | ||
2518 | if (config->direction == DMA_DEV_TO_MEM) { | 2271 | if (config->direction == DMA_FROM_DEVICE) { |
2519 | dma_addr_t dev_addr_rx = | 2272 | dma_addr_t dev_addr_rx = |
2520 | d40c->base->plat_data->dev_rx[cfg->src_dev_type]; | 2273 | d40c->base->plat_data->dev_rx[cfg->src_dev_type]; |
2521 | 2274 | ||
@@ -2538,7 +2291,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, | |||
2538 | if (dst_maxburst == 0) | 2291 | if (dst_maxburst == 0) |
2539 | dst_maxburst = src_maxburst; | 2292 | dst_maxburst = src_maxburst; |
2540 | 2293 | ||
2541 | } else if (config->direction == DMA_MEM_TO_DEV) { | 2294 | } else if (config->direction == DMA_TO_DEVICE) { |
2542 | dma_addr_t dev_addr_tx = | 2295 | dma_addr_t dev_addr_tx = |
2543 | d40c->base->plat_data->dev_tx[cfg->dst_dev_type]; | 2296 | d40c->base->plat_data->dev_tx[cfg->dst_dev_type]; |
2544 | 2297 | ||
@@ -2603,7 +2356,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, | |||
2603 | "configured channel %s for %s, data width %d/%d, " | 2356 | "configured channel %s for %s, data width %d/%d, " |
2604 | "maxburst %d/%d elements, LE, no flow control\n", | 2357 | "maxburst %d/%d elements, LE, no flow control\n", |
2605 | dma_chan_name(chan), | 2358 | dma_chan_name(chan), |
2606 | (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", | 2359 | (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", |
2607 | src_addr_width, dst_addr_width, | 2360 | src_addr_width, dst_addr_width, |
2608 | src_maxburst, dst_maxburst); | 2361 | src_maxburst, dst_maxburst); |
2609 | 2362 | ||
@@ -2622,8 +2375,7 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
2622 | 2375 | ||
2623 | switch (cmd) { | 2376 | switch (cmd) { |
2624 | case DMA_TERMINATE_ALL: | 2377 | case DMA_TERMINATE_ALL: |
2625 | d40_terminate_all(chan); | 2378 | return d40_terminate_all(d40c); |
2626 | return 0; | ||
2627 | case DMA_PAUSE: | 2379 | case DMA_PAUSE: |
2628 | return d40_pause(d40c); | 2380 | return d40_pause(d40c); |
2629 | case DMA_RESUME: | 2381 | case DMA_RESUME: |
@@ -2766,72 +2518,6 @@ failure1: | |||
2766 | return err; | 2518 | return err; |
2767 | } | 2519 | } |
2768 | 2520 | ||
2769 | /* Suspend resume functionality */ | ||
2770 | #ifdef CONFIG_PM | ||
2771 | static int dma40_pm_suspend(struct device *dev) | ||
2772 | { | ||
2773 | struct platform_device *pdev = to_platform_device(dev); | ||
2774 | struct d40_base *base = platform_get_drvdata(pdev); | ||
2775 | int ret = 0; | ||
2776 | if (!pm_runtime_suspended(dev)) | ||
2777 | return -EBUSY; | ||
2778 | |||
2779 | if (base->lcpa_regulator) | ||
2780 | ret = regulator_disable(base->lcpa_regulator); | ||
2781 | return ret; | ||
2782 | } | ||
2783 | |||
2784 | static int dma40_runtime_suspend(struct device *dev) | ||
2785 | { | ||
2786 | struct platform_device *pdev = to_platform_device(dev); | ||
2787 | struct d40_base *base = platform_get_drvdata(pdev); | ||
2788 | |||
2789 | d40_save_restore_registers(base, true); | ||
2790 | |||
2791 | /* Don't disable/enable clocks for v1 due to HW bugs */ | ||
2792 | if (base->rev != 1) | ||
2793 | writel_relaxed(base->gcc_pwr_off_mask, | ||
2794 | base->virtbase + D40_DREG_GCC); | ||
2795 | |||
2796 | return 0; | ||
2797 | } | ||
2798 | |||
2799 | static int dma40_runtime_resume(struct device *dev) | ||
2800 | { | ||
2801 | struct platform_device *pdev = to_platform_device(dev); | ||
2802 | struct d40_base *base = platform_get_drvdata(pdev); | ||
2803 | |||
2804 | if (base->initialized) | ||
2805 | d40_save_restore_registers(base, false); | ||
2806 | |||
2807 | writel_relaxed(D40_DREG_GCC_ENABLE_ALL, | ||
2808 | base->virtbase + D40_DREG_GCC); | ||
2809 | return 0; | ||
2810 | } | ||
2811 | |||
2812 | static int dma40_resume(struct device *dev) | ||
2813 | { | ||
2814 | struct platform_device *pdev = to_platform_device(dev); | ||
2815 | struct d40_base *base = platform_get_drvdata(pdev); | ||
2816 | int ret = 0; | ||
2817 | |||
2818 | if (base->lcpa_regulator) | ||
2819 | ret = regulator_enable(base->lcpa_regulator); | ||
2820 | |||
2821 | return ret; | ||
2822 | } | ||
2823 | |||
2824 | static const struct dev_pm_ops dma40_pm_ops = { | ||
2825 | .suspend = dma40_pm_suspend, | ||
2826 | .runtime_suspend = dma40_runtime_suspend, | ||
2827 | .runtime_resume = dma40_runtime_resume, | ||
2828 | .resume = dma40_resume, | ||
2829 | }; | ||
2830 | #define DMA40_PM_OPS (&dma40_pm_ops) | ||
2831 | #else | ||
2832 | #define DMA40_PM_OPS NULL | ||
2833 | #endif | ||
2834 | |||
2835 | /* Initialization functions. */ | 2521 | /* Initialization functions. */ |
2836 | 2522 | ||
2837 | static int __init d40_phy_res_init(struct d40_base *base) | 2523 | static int __init d40_phy_res_init(struct d40_base *base) |
@@ -2840,7 +2526,6 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
2840 | int num_phy_chans_avail = 0; | 2526 | int num_phy_chans_avail = 0; |
2841 | u32 val[2]; | 2527 | u32 val[2]; |
2842 | int odd_even_bit = -2; | 2528 | int odd_even_bit = -2; |
2843 | int gcc = D40_DREG_GCC_ENA; | ||
2844 | 2529 | ||
2845 | val[0] = readl(base->virtbase + D40_DREG_PRSME); | 2530 | val[0] = readl(base->virtbase + D40_DREG_PRSME); |
2846 | val[1] = readl(base->virtbase + D40_DREG_PRSMO); | 2531 | val[1] = readl(base->virtbase + D40_DREG_PRSMO); |
@@ -2852,17 +2537,9 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
2852 | /* Mark security only channels as occupied */ | 2537 | /* Mark security only channels as occupied */ |
2853 | base->phy_res[i].allocated_src = D40_ALLOC_PHY; | 2538 | base->phy_res[i].allocated_src = D40_ALLOC_PHY; |
2854 | base->phy_res[i].allocated_dst = D40_ALLOC_PHY; | 2539 | base->phy_res[i].allocated_dst = D40_ALLOC_PHY; |
2855 | base->phy_res[i].reserved = true; | ||
2856 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), | ||
2857 | D40_DREG_GCC_SRC); | ||
2858 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), | ||
2859 | D40_DREG_GCC_DST); | ||
2860 | |||
2861 | |||
2862 | } else { | 2540 | } else { |
2863 | base->phy_res[i].allocated_src = D40_ALLOC_FREE; | 2541 | base->phy_res[i].allocated_src = D40_ALLOC_FREE; |
2864 | base->phy_res[i].allocated_dst = D40_ALLOC_FREE; | 2542 | base->phy_res[i].allocated_dst = D40_ALLOC_FREE; |
2865 | base->phy_res[i].reserved = false; | ||
2866 | num_phy_chans_avail++; | 2543 | num_phy_chans_avail++; |
2867 | } | 2544 | } |
2868 | spin_lock_init(&base->phy_res[i].lock); | 2545 | spin_lock_init(&base->phy_res[i].lock); |
@@ -2874,11 +2551,6 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
2874 | 2551 | ||
2875 | base->phy_res[chan].allocated_src = D40_ALLOC_PHY; | 2552 | base->phy_res[chan].allocated_src = D40_ALLOC_PHY; |
2876 | base->phy_res[chan].allocated_dst = D40_ALLOC_PHY; | 2553 | base->phy_res[chan].allocated_dst = D40_ALLOC_PHY; |
2877 | base->phy_res[chan].reserved = true; | ||
2878 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), | ||
2879 | D40_DREG_GCC_SRC); | ||
2880 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), | ||
2881 | D40_DREG_GCC_DST); | ||
2882 | num_phy_chans_avail--; | 2554 | num_phy_chans_avail--; |
2883 | } | 2555 | } |
2884 | 2556 | ||
@@ -2899,15 +2571,6 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
2899 | val[0] = val[0] >> 2; | 2571 | val[0] = val[0] >> 2; |
2900 | } | 2572 | } |
2901 | 2573 | ||
2902 | /* | ||
2903 | * To keep things simple, Enable all clocks initially. | ||
2904 | * The clocks will get managed later post channel allocation. | ||
2905 | * The clocks for the event lines on which reserved channels exists | ||
2906 | * are not managed here. | ||
2907 | */ | ||
2908 | writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC); | ||
2909 | base->gcc_pwr_off_mask = gcc; | ||
2910 | |||
2911 | return num_phy_chans_avail; | 2574 | return num_phy_chans_avail; |
2912 | } | 2575 | } |
2913 | 2576 | ||
@@ -2920,23 +2583,19 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2920 | struct d40_base *base = NULL; | 2583 | struct d40_base *base = NULL; |
2921 | int num_log_chans = 0; | 2584 | int num_log_chans = 0; |
2922 | int num_phy_chans; | 2585 | int num_phy_chans; |
2923 | int clk_ret = -EINVAL; | ||
2924 | int i; | 2586 | int i; |
2925 | u32 pid; | 2587 | u32 pid; |
2926 | u32 cid; | 2588 | u32 cid; |
2927 | u8 rev; | 2589 | u8 rev; |
2928 | 2590 | ||
2929 | clk = clk_get(&pdev->dev, NULL); | 2591 | clk = clk_get(&pdev->dev, NULL); |
2592 | |||
2930 | if (IS_ERR(clk)) { | 2593 | if (IS_ERR(clk)) { |
2931 | d40_err(&pdev->dev, "No matching clock found\n"); | 2594 | d40_err(&pdev->dev, "No matching clock found\n"); |
2932 | goto failure; | 2595 | goto failure; |
2933 | } | 2596 | } |
2934 | 2597 | ||
2935 | clk_ret = clk_prepare_enable(clk); | 2598 | clk_enable(clk); |
2936 | if (clk_ret) { | ||
2937 | d40_err(&pdev->dev, "Failed to prepare/enable clock\n"); | ||
2938 | goto failure; | ||
2939 | } | ||
2940 | 2599 | ||
2941 | /* Get IO for DMAC base address */ | 2600 | /* Get IO for DMAC base address */ |
2942 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); | 2601 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); |
@@ -2984,12 +2643,6 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2984 | dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", | 2643 | dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", |
2985 | rev, res->start); | 2644 | rev, res->start); |
2986 | 2645 | ||
2987 | if (rev < 2) { | ||
2988 | d40_err(&pdev->dev, "hardware revision: %d is not supported", | ||
2989 | rev); | ||
2990 | goto failure; | ||
2991 | } | ||
2992 | |||
2993 | plat_data = pdev->dev.platform_data; | 2646 | plat_data = pdev->dev.platform_data; |
2994 | 2647 | ||
2995 | /* Count the number of logical channels in use */ | 2648 | /* Count the number of logical channels in use */ |
@@ -3045,15 +2698,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
3045 | goto failure; | 2698 | goto failure; |
3046 | } | 2699 | } |
3047 | 2700 | ||
3048 | base->reg_val_backup_chan = kmalloc(base->num_phy_chans * | 2701 | base->lcla_pool.alloc_map = kzalloc(num_phy_chans * |
3049 | sizeof(d40_backup_regs_chan), | 2702 | sizeof(struct d40_desc *) * |
2703 | D40_LCLA_LINK_PER_EVENT_GRP, | ||
3050 | GFP_KERNEL); | 2704 | GFP_KERNEL); |
3051 | if (!base->reg_val_backup_chan) | ||
3052 | goto failure; | ||
3053 | |||
3054 | base->lcla_pool.alloc_map = | ||
3055 | kzalloc(num_phy_chans * sizeof(struct d40_desc *) | ||
3056 | * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL); | ||
3057 | if (!base->lcla_pool.alloc_map) | 2705 | if (!base->lcla_pool.alloc_map) |
3058 | goto failure; | 2706 | goto failure; |
3059 | 2707 | ||
@@ -3066,10 +2714,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
3066 | return base; | 2714 | return base; |
3067 | 2715 | ||
3068 | failure: | 2716 | failure: |
3069 | if (!clk_ret) | 2717 | if (!IS_ERR(clk)) { |
3070 | clk_disable_unprepare(clk); | 2718 | clk_disable(clk); |
3071 | if (!IS_ERR(clk)) | ||
3072 | clk_put(clk); | 2719 | clk_put(clk); |
2720 | } | ||
3073 | if (virtbase) | 2721 | if (virtbase) |
3074 | iounmap(virtbase); | 2722 | iounmap(virtbase); |
3075 | if (res) | 2723 | if (res) |
@@ -3080,7 +2728,6 @@ failure: | |||
3080 | 2728 | ||
3081 | if (base) { | 2729 | if (base) { |
3082 | kfree(base->lcla_pool.alloc_map); | 2730 | kfree(base->lcla_pool.alloc_map); |
3083 | kfree(base->reg_val_backup_chan); | ||
3084 | kfree(base->lookup_log_chans); | 2731 | kfree(base->lookup_log_chans); |
3085 | kfree(base->lookup_phy_chans); | 2732 | kfree(base->lookup_phy_chans); |
3086 | kfree(base->phy_res); | 2733 | kfree(base->phy_res); |
@@ -3093,9 +2740,9 @@ failure: | |||
3093 | static void __init d40_hw_init(struct d40_base *base) | 2740 | static void __init d40_hw_init(struct d40_base *base) |
3094 | { | 2741 | { |
3095 | 2742 | ||
3096 | static struct d40_reg_val dma_init_reg[] = { | 2743 | static const struct d40_reg_val dma_init_reg[] = { |
3097 | /* Clock every part of the DMA block from start */ | 2744 | /* Clock every part of the DMA block from start */ |
3098 | { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL}, | 2745 | { .reg = D40_DREG_GCC, .val = 0x0000ff01}, |
3099 | 2746 | ||
3100 | /* Interrupts on all logical channels */ | 2747 | /* Interrupts on all logical channels */ |
3101 | { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, | 2748 | { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, |
@@ -3295,31 +2942,11 @@ static int __init d40_probe(struct platform_device *pdev) | |||
3295 | d40_err(&pdev->dev, "Failed to ioremap LCPA region\n"); | 2942 | d40_err(&pdev->dev, "Failed to ioremap LCPA region\n"); |
3296 | goto failure; | 2943 | goto failure; |
3297 | } | 2944 | } |
3298 | /* If lcla has to be located in ESRAM we don't need to allocate */ | ||
3299 | if (base->plat_data->use_esram_lcla) { | ||
3300 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, | ||
3301 | "lcla_esram"); | ||
3302 | if (!res) { | ||
3303 | ret = -ENOENT; | ||
3304 | d40_err(&pdev->dev, | ||
3305 | "No \"lcla_esram\" memory resource\n"); | ||
3306 | goto failure; | ||
3307 | } | ||
3308 | base->lcla_pool.base = ioremap(res->start, | ||
3309 | resource_size(res)); | ||
3310 | if (!base->lcla_pool.base) { | ||
3311 | ret = -ENOMEM; | ||
3312 | d40_err(&pdev->dev, "Failed to ioremap LCLA region\n"); | ||
3313 | goto failure; | ||
3314 | } | ||
3315 | writel(res->start, base->virtbase + D40_DREG_LCLA); | ||
3316 | 2945 | ||
3317 | } else { | 2946 | ret = d40_lcla_allocate(base); |
3318 | ret = d40_lcla_allocate(base); | 2947 | if (ret) { |
3319 | if (ret) { | 2948 | d40_err(&pdev->dev, "Failed to allocate LCLA area\n"); |
3320 | d40_err(&pdev->dev, "Failed to allocate LCLA area\n"); | 2949 | goto failure; |
3321 | goto failure; | ||
3322 | } | ||
3323 | } | 2950 | } |
3324 | 2951 | ||
3325 | spin_lock_init(&base->lcla_pool.lock); | 2952 | spin_lock_init(&base->lcla_pool.lock); |
@@ -3332,32 +2959,6 @@ static int __init d40_probe(struct platform_device *pdev) | |||
3332 | goto failure; | 2959 | goto failure; |
3333 | } | 2960 | } |
3334 | 2961 | ||
3335 | pm_runtime_irq_safe(base->dev); | ||
3336 | pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY); | ||
3337 | pm_runtime_use_autosuspend(base->dev); | ||
3338 | pm_runtime_enable(base->dev); | ||
3339 | pm_runtime_resume(base->dev); | ||
3340 | |||
3341 | if (base->plat_data->use_esram_lcla) { | ||
3342 | |||
3343 | base->lcpa_regulator = regulator_get(base->dev, "lcla_esram"); | ||
3344 | if (IS_ERR(base->lcpa_regulator)) { | ||
3345 | d40_err(&pdev->dev, "Failed to get lcpa_regulator\n"); | ||
3346 | base->lcpa_regulator = NULL; | ||
3347 | goto failure; | ||
3348 | } | ||
3349 | |||
3350 | ret = regulator_enable(base->lcpa_regulator); | ||
3351 | if (ret) { | ||
3352 | d40_err(&pdev->dev, | ||
3353 | "Failed to enable lcpa_regulator\n"); | ||
3354 | regulator_put(base->lcpa_regulator); | ||
3355 | base->lcpa_regulator = NULL; | ||
3356 | goto failure; | ||
3357 | } | ||
3358 | } | ||
3359 | |||
3360 | base->initialized = true; | ||
3361 | err = d40_dmaengine_init(base, num_reserved_chans); | 2962 | err = d40_dmaengine_init(base, num_reserved_chans); |
3362 | if (err) | 2963 | if (err) |
3363 | goto failure; | 2964 | goto failure; |
@@ -3374,11 +2975,6 @@ failure: | |||
3374 | if (base->virtbase) | 2975 | if (base->virtbase) |
3375 | iounmap(base->virtbase); | 2976 | iounmap(base->virtbase); |
3376 | 2977 | ||
3377 | if (base->lcla_pool.base && base->plat_data->use_esram_lcla) { | ||
3378 | iounmap(base->lcla_pool.base); | ||
3379 | base->lcla_pool.base = NULL; | ||
3380 | } | ||
3381 | |||
3382 | if (base->lcla_pool.dma_addr) | 2978 | if (base->lcla_pool.dma_addr) |
3383 | dma_unmap_single(base->dev, base->lcla_pool.dma_addr, | 2979 | dma_unmap_single(base->dev, base->lcla_pool.dma_addr, |
3384 | SZ_1K * base->num_phy_chans, | 2980 | SZ_1K * base->num_phy_chans, |
@@ -3401,11 +2997,6 @@ failure: | |||
3401 | clk_put(base->clk); | 2997 | clk_put(base->clk); |
3402 | } | 2998 | } |
3403 | 2999 | ||
3404 | if (base->lcpa_regulator) { | ||
3405 | regulator_disable(base->lcpa_regulator); | ||
3406 | regulator_put(base->lcpa_regulator); | ||
3407 | } | ||
3408 | |||
3409 | kfree(base->lcla_pool.alloc_map); | 3000 | kfree(base->lcla_pool.alloc_map); |
3410 | kfree(base->lookup_log_chans); | 3001 | kfree(base->lookup_log_chans); |
3411 | kfree(base->lookup_phy_chans); | 3002 | kfree(base->lookup_phy_chans); |
@@ -3421,7 +3012,6 @@ static struct platform_driver d40_driver = { | |||
3421 | .driver = { | 3012 | .driver = { |
3422 | .owner = THIS_MODULE, | 3013 | .owner = THIS_MODULE, |
3423 | .name = D40_NAME, | 3014 | .name = D40_NAME, |
3424 | .pm = DMA40_PM_OPS, | ||
3425 | }, | 3015 | }, |
3426 | }; | 3016 | }; |
3427 | 3017 | ||
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index 851ad56e840..cad9e1daedf 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c | |||
@@ -6,7 +6,7 @@ | |||
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
9 | #include <linux/platform_data/dma-ste-dma40.h> | 9 | #include <plat/ste_dma40.h> |
10 | 10 | ||
11 | #include "ste_dma40_ll.h" | 11 | #include "ste_dma40_ll.h" |
12 | 12 | ||
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index 6d47373f3f5..b44c455158d 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h | |||
@@ -16,8 +16,6 @@ | |||
16 | 16 | ||
17 | #define D40_TYPE_TO_GROUP(type) (type / 16) | 17 | #define D40_TYPE_TO_GROUP(type) (type / 16) |
18 | #define D40_TYPE_TO_EVENT(type) (type % 16) | 18 | #define D40_TYPE_TO_EVENT(type) (type % 16) |
19 | #define D40_GROUP_SIZE 8 | ||
20 | #define D40_PHYS_TO_GROUP(phys) ((phys & (D40_GROUP_SIZE - 1)) / 2) | ||
21 | 19 | ||
22 | /* Most bits of the CFG register are the same in log as in phy mode */ | 20 | /* Most bits of the CFG register are the same in log as in phy mode */ |
23 | #define D40_SREG_CFG_MST_POS 15 | 21 | #define D40_SREG_CFG_MST_POS 15 |
@@ -62,6 +60,8 @@ | |||
62 | #define D40_SREG_ELEM_LOG_LIDX_MASK (0xFF << D40_SREG_ELEM_LOG_LIDX_POS) | 60 | #define D40_SREG_ELEM_LOG_LIDX_MASK (0xFF << D40_SREG_ELEM_LOG_LIDX_POS) |
63 | 61 | ||
64 | /* Link register */ | 62 | /* Link register */ |
63 | #define D40_DEACTIVATE_EVENTLINE 0x0 | ||
64 | #define D40_ACTIVATE_EVENTLINE 0x1 | ||
65 | #define D40_EVENTLINE_POS(i) (2 * i) | 65 | #define D40_EVENTLINE_POS(i) (2 * i) |
66 | #define D40_EVENTLINE_MASK(i) (0x3 << D40_EVENTLINE_POS(i)) | 66 | #define D40_EVENTLINE_MASK(i) (0x3 << D40_EVENTLINE_POS(i)) |
67 | 67 | ||
@@ -123,15 +123,6 @@ | |||
123 | 123 | ||
124 | /* DMA Register Offsets */ | 124 | /* DMA Register Offsets */ |
125 | #define D40_DREG_GCC 0x000 | 125 | #define D40_DREG_GCC 0x000 |
126 | #define D40_DREG_GCC_ENA 0x1 | ||
127 | /* This assumes that there are only 4 event groups */ | ||
128 | #define D40_DREG_GCC_ENABLE_ALL 0xff01 | ||
129 | #define D40_DREG_GCC_EVTGRP_POS 8 | ||
130 | #define D40_DREG_GCC_SRC 0 | ||
131 | #define D40_DREG_GCC_DST 1 | ||
132 | #define D40_DREG_GCC_EVTGRP_ENA(x, y) \ | ||
133 | (1 << (D40_DREG_GCC_EVTGRP_POS + 2 * x + y)) | ||
134 | |||
135 | #define D40_DREG_PRTYP 0x004 | 126 | #define D40_DREG_PRTYP 0x004 |
136 | #define D40_DREG_PRSME 0x008 | 127 | #define D40_DREG_PRSME 0x008 |
137 | #define D40_DREG_PRSMO 0x00C | 128 | #define D40_DREG_PRSMO 0x00C |
@@ -202,7 +193,7 @@ | |||
202 | /* LLI related structures */ | 193 | /* LLI related structures */ |
203 | 194 | ||
204 | /** | 195 | /** |
205 | * struct d40_phy_lli - The basic configuration register for each physical | 196 | * struct d40_phy_lli - The basic configration register for each physical |
206 | * channel. | 197 | * channel. |
207 | * | 198 | * |
208 | * @reg_cfg: The configuration register. | 199 | * @reg_cfg: The configuration register. |
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c deleted file mode 100644 index c39e61bc817..00000000000 --- a/drivers/dma/tegra20-apb-dma.c +++ /dev/null | |||
@@ -1,1429 +0,0 @@ | |||
1 | /* | ||
2 | * DMA driver for Nvidia's Tegra20 APB DMA controller. | ||
3 | * | ||
4 | * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | |||
19 | #include <linux/bitops.h> | ||
20 | #include <linux/clk.h> | ||
21 | #include <linux/delay.h> | ||
22 | #include <linux/dmaengine.h> | ||
23 | #include <linux/dma-mapping.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/io.h> | ||
27 | #include <linux/mm.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/of.h> | ||
30 | #include <linux/of_device.h> | ||
31 | #include <linux/platform_device.h> | ||
32 | #include <linux/pm_runtime.h> | ||
33 | #include <linux/slab.h> | ||
34 | |||
35 | #include <mach/clk.h> | ||
36 | #include "dmaengine.h" | ||
37 | |||
38 | #define TEGRA_APBDMA_GENERAL 0x0 | ||
39 | #define TEGRA_APBDMA_GENERAL_ENABLE BIT(31) | ||
40 | |||
41 | #define TEGRA_APBDMA_CONTROL 0x010 | ||
42 | #define TEGRA_APBDMA_IRQ_MASK 0x01c | ||
43 | #define TEGRA_APBDMA_IRQ_MASK_SET 0x020 | ||
44 | |||
45 | /* CSR register */ | ||
46 | #define TEGRA_APBDMA_CHAN_CSR 0x00 | ||
47 | #define TEGRA_APBDMA_CSR_ENB BIT(31) | ||
48 | #define TEGRA_APBDMA_CSR_IE_EOC BIT(30) | ||
49 | #define TEGRA_APBDMA_CSR_HOLD BIT(29) | ||
50 | #define TEGRA_APBDMA_CSR_DIR BIT(28) | ||
51 | #define TEGRA_APBDMA_CSR_ONCE BIT(27) | ||
52 | #define TEGRA_APBDMA_CSR_FLOW BIT(21) | ||
53 | #define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16 | ||
54 | #define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC | ||
55 | |||
56 | /* STATUS register */ | ||
57 | #define TEGRA_APBDMA_CHAN_STATUS 0x004 | ||
58 | #define TEGRA_APBDMA_STATUS_BUSY BIT(31) | ||
59 | #define TEGRA_APBDMA_STATUS_ISE_EOC BIT(30) | ||
60 | #define TEGRA_APBDMA_STATUS_HALT BIT(29) | ||
61 | #define TEGRA_APBDMA_STATUS_PING_PONG BIT(28) | ||
62 | #define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2 | ||
63 | #define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC | ||
64 | |||
65 | /* AHB memory address */ | ||
66 | #define TEGRA_APBDMA_CHAN_AHBPTR 0x010 | ||
67 | |||
68 | /* AHB sequence register */ | ||
69 | #define TEGRA_APBDMA_CHAN_AHBSEQ 0x14 | ||
70 | #define TEGRA_APBDMA_AHBSEQ_INTR_ENB BIT(31) | ||
71 | #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8 (0 << 28) | ||
72 | #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16 (1 << 28) | ||
73 | #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32 (2 << 28) | ||
74 | #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64 (3 << 28) | ||
75 | #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128 (4 << 28) | ||
76 | #define TEGRA_APBDMA_AHBSEQ_DATA_SWAP BIT(27) | ||
77 | #define TEGRA_APBDMA_AHBSEQ_BURST_1 (4 << 24) | ||
78 | #define TEGRA_APBDMA_AHBSEQ_BURST_4 (5 << 24) | ||
79 | #define TEGRA_APBDMA_AHBSEQ_BURST_8 (6 << 24) | ||
80 | #define TEGRA_APBDMA_AHBSEQ_DBL_BUF BIT(19) | ||
81 | #define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT 16 | ||
82 | #define TEGRA_APBDMA_AHBSEQ_WRAP_NONE 0 | ||
83 | |||
84 | /* APB address */ | ||
85 | #define TEGRA_APBDMA_CHAN_APBPTR 0x018 | ||
86 | |||
87 | /* APB sequence register */ | ||
88 | #define TEGRA_APBDMA_CHAN_APBSEQ 0x01c | ||
89 | #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8 (0 << 28) | ||
90 | #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16 (1 << 28) | ||
91 | #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32 (2 << 28) | ||
92 | #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64 (3 << 28) | ||
93 | #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128 (4 << 28) | ||
94 | #define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27) | ||
95 | #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16) | ||
96 | |||
97 | /* | ||
98 | * If any burst is in flight and DMA paused then this is the time to complete | ||
99 | * on-flight burst and update DMA status register. | ||
100 | */ | ||
101 | #define TEGRA_APBDMA_BURST_COMPLETE_TIME 20 | ||
102 | |||
103 | /* Channel base address offset from APBDMA base address */ | ||
104 | #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000 | ||
105 | |||
106 | /* DMA channel register space size */ | ||
107 | #define TEGRA_APBDMA_CHANNEL_REGISTER_SIZE 0x20 | ||
108 | |||
109 | struct tegra_dma; | ||
110 | |||
111 | /* | ||
112 | * tegra_dma_chip_data Tegra chip specific DMA data | ||
113 | * @nr_channels: Number of channels available in the controller. | ||
114 | * @max_dma_count: Maximum DMA transfer count supported by DMA controller. | ||
115 | */ | ||
116 | struct tegra_dma_chip_data { | ||
117 | int nr_channels; | ||
118 | int max_dma_count; | ||
119 | }; | ||
120 | |||
121 | /* DMA channel registers */ | ||
122 | struct tegra_dma_channel_regs { | ||
123 | unsigned long csr; | ||
124 | unsigned long ahb_ptr; | ||
125 | unsigned long apb_ptr; | ||
126 | unsigned long ahb_seq; | ||
127 | unsigned long apb_seq; | ||
128 | }; | ||
129 | |||
130 | /* | ||
131 | * tegra_dma_sg_req: Dma request details to configure hardware. This | ||
132 | * contains the details for one transfer to configure DMA hw. | ||
133 | * The client's request for data transfer can be broken into multiple | ||
134 | * sub-transfer as per requester details and hw support. | ||
135 | * This sub transfer get added in the list of transfer and point to Tegra | ||
136 | * DMA descriptor which manages the transfer details. | ||
137 | */ | ||
138 | struct tegra_dma_sg_req { | ||
139 | struct tegra_dma_channel_regs ch_regs; | ||
140 | int req_len; | ||
141 | bool configured; | ||
142 | bool last_sg; | ||
143 | bool half_done; | ||
144 | struct list_head node; | ||
145 | struct tegra_dma_desc *dma_desc; | ||
146 | }; | ||
147 | |||
148 | /* | ||
149 | * tegra_dma_desc: Tegra DMA descriptors which manages the client requests. | ||
150 | * This descriptor keep track of transfer status, callbacks and request | ||
151 | * counts etc. | ||
152 | */ | ||
153 | struct tegra_dma_desc { | ||
154 | struct dma_async_tx_descriptor txd; | ||
155 | int bytes_requested; | ||
156 | int bytes_transferred; | ||
157 | enum dma_status dma_status; | ||
158 | struct list_head node; | ||
159 | struct list_head tx_list; | ||
160 | struct list_head cb_node; | ||
161 | int cb_count; | ||
162 | }; | ||
163 | |||
164 | struct tegra_dma_channel; | ||
165 | |||
166 | typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc, | ||
167 | bool to_terminate); | ||
168 | |||
169 | /* tegra_dma_channel: Channel specific information */ | ||
170 | struct tegra_dma_channel { | ||
171 | struct dma_chan dma_chan; | ||
172 | char name[30]; | ||
173 | bool config_init; | ||
174 | int id; | ||
175 | int irq; | ||
176 | unsigned long chan_base_offset; | ||
177 | spinlock_t lock; | ||
178 | bool busy; | ||
179 | struct tegra_dma *tdma; | ||
180 | bool cyclic; | ||
181 | |||
182 | /* Different lists for managing the requests */ | ||
183 | struct list_head free_sg_req; | ||
184 | struct list_head pending_sg_req; | ||
185 | struct list_head free_dma_desc; | ||
186 | struct list_head cb_desc; | ||
187 | |||
188 | /* ISR handler and tasklet for bottom half of isr handling */ | ||
189 | dma_isr_handler isr_handler; | ||
190 | struct tasklet_struct tasklet; | ||
191 | dma_async_tx_callback callback; | ||
192 | void *callback_param; | ||
193 | |||
194 | /* Channel-slave specific configuration */ | ||
195 | struct dma_slave_config dma_sconfig; | ||
196 | }; | ||
197 | |||
198 | /* tegra_dma: Tegra DMA specific information */ | ||
199 | struct tegra_dma { | ||
200 | struct dma_device dma_dev; | ||
201 | struct device *dev; | ||
202 | struct clk *dma_clk; | ||
203 | spinlock_t global_lock; | ||
204 | void __iomem *base_addr; | ||
205 | const struct tegra_dma_chip_data *chip_data; | ||
206 | |||
207 | /* Some register need to be cache before suspend */ | ||
208 | u32 reg_gen; | ||
209 | |||
210 | /* Last member of the structure */ | ||
211 | struct tegra_dma_channel channels[0]; | ||
212 | }; | ||
213 | |||
214 | static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val) | ||
215 | { | ||
216 | writel(val, tdma->base_addr + reg); | ||
217 | } | ||
218 | |||
219 | static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg) | ||
220 | { | ||
221 | return readl(tdma->base_addr + reg); | ||
222 | } | ||
223 | |||
224 | static inline void tdc_write(struct tegra_dma_channel *tdc, | ||
225 | u32 reg, u32 val) | ||
226 | { | ||
227 | writel(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg); | ||
228 | } | ||
229 | |||
230 | static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg) | ||
231 | { | ||
232 | return readl(tdc->tdma->base_addr + tdc->chan_base_offset + reg); | ||
233 | } | ||
234 | |||
235 | static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc) | ||
236 | { | ||
237 | return container_of(dc, struct tegra_dma_channel, dma_chan); | ||
238 | } | ||
239 | |||
240 | static inline struct tegra_dma_desc *txd_to_tegra_dma_desc( | ||
241 | struct dma_async_tx_descriptor *td) | ||
242 | { | ||
243 | return container_of(td, struct tegra_dma_desc, txd); | ||
244 | } | ||
245 | |||
246 | static inline struct device *tdc2dev(struct tegra_dma_channel *tdc) | ||
247 | { | ||
248 | return &tdc->dma_chan.dev->device; | ||
249 | } | ||
250 | |||
251 | static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx); | ||
252 | static int tegra_dma_runtime_suspend(struct device *dev); | ||
253 | static int tegra_dma_runtime_resume(struct device *dev); | ||
254 | |||
255 | /* Get DMA desc from free list, if not there then allocate it. */ | ||
256 | static struct tegra_dma_desc *tegra_dma_desc_get( | ||
257 | struct tegra_dma_channel *tdc) | ||
258 | { | ||
259 | struct tegra_dma_desc *dma_desc; | ||
260 | unsigned long flags; | ||
261 | |||
262 | spin_lock_irqsave(&tdc->lock, flags); | ||
263 | |||
264 | /* Do not allocate if desc are waiting for ack */ | ||
265 | list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { | ||
266 | if (async_tx_test_ack(&dma_desc->txd)) { | ||
267 | list_del(&dma_desc->node); | ||
268 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
269 | return dma_desc; | ||
270 | } | ||
271 | } | ||
272 | |||
273 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
274 | |||
275 | /* Allocate DMA desc */ | ||
276 | dma_desc = kzalloc(sizeof(*dma_desc), GFP_ATOMIC); | ||
277 | if (!dma_desc) { | ||
278 | dev_err(tdc2dev(tdc), "dma_desc alloc failed\n"); | ||
279 | return NULL; | ||
280 | } | ||
281 | |||
282 | dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan); | ||
283 | dma_desc->txd.tx_submit = tegra_dma_tx_submit; | ||
284 | dma_desc->txd.flags = 0; | ||
285 | return dma_desc; | ||
286 | } | ||
287 | |||
288 | static void tegra_dma_desc_put(struct tegra_dma_channel *tdc, | ||
289 | struct tegra_dma_desc *dma_desc) | ||
290 | { | ||
291 | unsigned long flags; | ||
292 | |||
293 | spin_lock_irqsave(&tdc->lock, flags); | ||
294 | if (!list_empty(&dma_desc->tx_list)) | ||
295 | list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req); | ||
296 | list_add_tail(&dma_desc->node, &tdc->free_dma_desc); | ||
297 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
298 | } | ||
299 | |||
300 | static struct tegra_dma_sg_req *tegra_dma_sg_req_get( | ||
301 | struct tegra_dma_channel *tdc) | ||
302 | { | ||
303 | struct tegra_dma_sg_req *sg_req = NULL; | ||
304 | unsigned long flags; | ||
305 | |||
306 | spin_lock_irqsave(&tdc->lock, flags); | ||
307 | if (!list_empty(&tdc->free_sg_req)) { | ||
308 | sg_req = list_first_entry(&tdc->free_sg_req, | ||
309 | typeof(*sg_req), node); | ||
310 | list_del(&sg_req->node); | ||
311 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
312 | return sg_req; | ||
313 | } | ||
314 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
315 | |||
316 | sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_ATOMIC); | ||
317 | if (!sg_req) | ||
318 | dev_err(tdc2dev(tdc), "sg_req alloc failed\n"); | ||
319 | return sg_req; | ||
320 | } | ||
321 | |||
322 | static int tegra_dma_slave_config(struct dma_chan *dc, | ||
323 | struct dma_slave_config *sconfig) | ||
324 | { | ||
325 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); | ||
326 | |||
327 | if (!list_empty(&tdc->pending_sg_req)) { | ||
328 | dev_err(tdc2dev(tdc), "Configuration not allowed\n"); | ||
329 | return -EBUSY; | ||
330 | } | ||
331 | |||
332 | memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig)); | ||
333 | tdc->config_init = true; | ||
334 | return 0; | ||
335 | } | ||
336 | |||
337 | static void tegra_dma_global_pause(struct tegra_dma_channel *tdc, | ||
338 | bool wait_for_burst_complete) | ||
339 | { | ||
340 | struct tegra_dma *tdma = tdc->tdma; | ||
341 | |||
342 | spin_lock(&tdma->global_lock); | ||
343 | tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0); | ||
344 | if (wait_for_burst_complete) | ||
345 | udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME); | ||
346 | } | ||
347 | |||
348 | static void tegra_dma_global_resume(struct tegra_dma_channel *tdc) | ||
349 | { | ||
350 | struct tegra_dma *tdma = tdc->tdma; | ||
351 | |||
352 | tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE); | ||
353 | spin_unlock(&tdma->global_lock); | ||
354 | } | ||
355 | |||
356 | static void tegra_dma_stop(struct tegra_dma_channel *tdc) | ||
357 | { | ||
358 | u32 csr; | ||
359 | u32 status; | ||
360 | |||
361 | /* Disable interrupts */ | ||
362 | csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR); | ||
363 | csr &= ~TEGRA_APBDMA_CSR_IE_EOC; | ||
364 | tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr); | ||
365 | |||
366 | /* Disable DMA */ | ||
367 | csr &= ~TEGRA_APBDMA_CSR_ENB; | ||
368 | tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr); | ||
369 | |||
370 | /* Clear interrupt status if it is there */ | ||
371 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); | ||
372 | if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { | ||
373 | dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__); | ||
374 | tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status); | ||
375 | } | ||
376 | tdc->busy = false; | ||
377 | } | ||
378 | |||
379 | static void tegra_dma_start(struct tegra_dma_channel *tdc, | ||
380 | struct tegra_dma_sg_req *sg_req) | ||
381 | { | ||
382 | struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs; | ||
383 | |||
384 | tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr); | ||
385 | tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq); | ||
386 | tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr); | ||
387 | tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq); | ||
388 | tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr); | ||
389 | |||
390 | /* Start DMA */ | ||
391 | tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, | ||
392 | ch_regs->csr | TEGRA_APBDMA_CSR_ENB); | ||
393 | } | ||
394 | |||
395 | static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc, | ||
396 | struct tegra_dma_sg_req *nsg_req) | ||
397 | { | ||
398 | unsigned long status; | ||
399 | |||
400 | /* | ||
401 | * The DMA controller reloads the new configuration for next transfer | ||
402 | * after last burst of current transfer completes. | ||
403 | * If there is no IEC status then this makes sure that last burst | ||
404 | * has not be completed. There may be case that last burst is on | ||
405 | * flight and so it can complete but because DMA is paused, it | ||
406 | * will not generates interrupt as well as not reload the new | ||
407 | * configuration. | ||
408 | * If there is already IEC status then interrupt handler need to | ||
409 | * load new configuration. | ||
410 | */ | ||
411 | tegra_dma_global_pause(tdc, false); | ||
412 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); | ||
413 | |||
414 | /* | ||
415 | * If interrupt is pending then do nothing as the ISR will handle | ||
416 | * the programing for new request. | ||
417 | */ | ||
418 | if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { | ||
419 | dev_err(tdc2dev(tdc), | ||
420 | "Skipping new configuration as interrupt is pending\n"); | ||
421 | tegra_dma_global_resume(tdc); | ||
422 | return; | ||
423 | } | ||
424 | |||
425 | /* Safe to program new configuration */ | ||
426 | tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr); | ||
427 | tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr); | ||
428 | tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, | ||
429 | nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB); | ||
430 | nsg_req->configured = true; | ||
431 | |||
432 | tegra_dma_global_resume(tdc); | ||
433 | } | ||
434 | |||
435 | static void tdc_start_head_req(struct tegra_dma_channel *tdc) | ||
436 | { | ||
437 | struct tegra_dma_sg_req *sg_req; | ||
438 | |||
439 | if (list_empty(&tdc->pending_sg_req)) | ||
440 | return; | ||
441 | |||
442 | sg_req = list_first_entry(&tdc->pending_sg_req, | ||
443 | typeof(*sg_req), node); | ||
444 | tegra_dma_start(tdc, sg_req); | ||
445 | sg_req->configured = true; | ||
446 | tdc->busy = true; | ||
447 | } | ||
448 | |||
449 | static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc) | ||
450 | { | ||
451 | struct tegra_dma_sg_req *hsgreq; | ||
452 | struct tegra_dma_sg_req *hnsgreq; | ||
453 | |||
454 | if (list_empty(&tdc->pending_sg_req)) | ||
455 | return; | ||
456 | |||
457 | hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node); | ||
458 | if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) { | ||
459 | hnsgreq = list_first_entry(&hsgreq->node, | ||
460 | typeof(*hnsgreq), node); | ||
461 | tegra_dma_configure_for_next(tdc, hnsgreq); | ||
462 | } | ||
463 | } | ||
464 | |||
465 | static inline int get_current_xferred_count(struct tegra_dma_channel *tdc, | ||
466 | struct tegra_dma_sg_req *sg_req, unsigned long status) | ||
467 | { | ||
468 | return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4; | ||
469 | } | ||
470 | |||
471 | static void tegra_dma_abort_all(struct tegra_dma_channel *tdc) | ||
472 | { | ||
473 | struct tegra_dma_sg_req *sgreq; | ||
474 | struct tegra_dma_desc *dma_desc; | ||
475 | |||
476 | while (!list_empty(&tdc->pending_sg_req)) { | ||
477 | sgreq = list_first_entry(&tdc->pending_sg_req, | ||
478 | typeof(*sgreq), node); | ||
479 | list_move_tail(&sgreq->node, &tdc->free_sg_req); | ||
480 | if (sgreq->last_sg) { | ||
481 | dma_desc = sgreq->dma_desc; | ||
482 | dma_desc->dma_status = DMA_ERROR; | ||
483 | list_add_tail(&dma_desc->node, &tdc->free_dma_desc); | ||
484 | |||
485 | /* Add in cb list if it is not there. */ | ||
486 | if (!dma_desc->cb_count) | ||
487 | list_add_tail(&dma_desc->cb_node, | ||
488 | &tdc->cb_desc); | ||
489 | dma_desc->cb_count++; | ||
490 | } | ||
491 | } | ||
492 | tdc->isr_handler = NULL; | ||
493 | } | ||
494 | |||
495 | static bool handle_continuous_head_request(struct tegra_dma_channel *tdc, | ||
496 | struct tegra_dma_sg_req *last_sg_req, bool to_terminate) | ||
497 | { | ||
498 | struct tegra_dma_sg_req *hsgreq = NULL; | ||
499 | |||
500 | if (list_empty(&tdc->pending_sg_req)) { | ||
501 | dev_err(tdc2dev(tdc), "Dma is running without req\n"); | ||
502 | tegra_dma_stop(tdc); | ||
503 | return false; | ||
504 | } | ||
505 | |||
506 | /* | ||
507 | * Check that head req on list should be in flight. | ||
508 | * If it is not in flight then abort transfer as | ||
509 | * looping of transfer can not continue. | ||
510 | */ | ||
511 | hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node); | ||
512 | if (!hsgreq->configured) { | ||
513 | tegra_dma_stop(tdc); | ||
514 | dev_err(tdc2dev(tdc), "Error in dma transfer, aborting dma\n"); | ||
515 | tegra_dma_abort_all(tdc); | ||
516 | return false; | ||
517 | } | ||
518 | |||
519 | /* Configure next request */ | ||
520 | if (!to_terminate) | ||
521 | tdc_configure_next_head_desc(tdc); | ||
522 | return true; | ||
523 | } | ||
524 | |||
525 | static void handle_once_dma_done(struct tegra_dma_channel *tdc, | ||
526 | bool to_terminate) | ||
527 | { | ||
528 | struct tegra_dma_sg_req *sgreq; | ||
529 | struct tegra_dma_desc *dma_desc; | ||
530 | |||
531 | tdc->busy = false; | ||
532 | sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node); | ||
533 | dma_desc = sgreq->dma_desc; | ||
534 | dma_desc->bytes_transferred += sgreq->req_len; | ||
535 | |||
536 | list_del(&sgreq->node); | ||
537 | if (sgreq->last_sg) { | ||
538 | dma_desc->dma_status = DMA_SUCCESS; | ||
539 | dma_cookie_complete(&dma_desc->txd); | ||
540 | if (!dma_desc->cb_count) | ||
541 | list_add_tail(&dma_desc->cb_node, &tdc->cb_desc); | ||
542 | dma_desc->cb_count++; | ||
543 | list_add_tail(&dma_desc->node, &tdc->free_dma_desc); | ||
544 | } | ||
545 | list_add_tail(&sgreq->node, &tdc->free_sg_req); | ||
546 | |||
547 | /* Do not start DMA if it is going to be terminate */ | ||
548 | if (to_terminate || list_empty(&tdc->pending_sg_req)) | ||
549 | return; | ||
550 | |||
551 | tdc_start_head_req(tdc); | ||
552 | return; | ||
553 | } | ||
554 | |||
555 | static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc, | ||
556 | bool to_terminate) | ||
557 | { | ||
558 | struct tegra_dma_sg_req *sgreq; | ||
559 | struct tegra_dma_desc *dma_desc; | ||
560 | bool st; | ||
561 | |||
562 | sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node); | ||
563 | dma_desc = sgreq->dma_desc; | ||
564 | dma_desc->bytes_transferred += sgreq->req_len; | ||
565 | |||
566 | /* Callback need to be call */ | ||
567 | if (!dma_desc->cb_count) | ||
568 | list_add_tail(&dma_desc->cb_node, &tdc->cb_desc); | ||
569 | dma_desc->cb_count++; | ||
570 | |||
571 | /* If not last req then put at end of pending list */ | ||
572 | if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) { | ||
573 | list_move_tail(&sgreq->node, &tdc->pending_sg_req); | ||
574 | sgreq->configured = false; | ||
575 | st = handle_continuous_head_request(tdc, sgreq, to_terminate); | ||
576 | if (!st) | ||
577 | dma_desc->dma_status = DMA_ERROR; | ||
578 | } | ||
579 | return; | ||
580 | } | ||
581 | |||
582 | static void tegra_dma_tasklet(unsigned long data) | ||
583 | { | ||
584 | struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data; | ||
585 | dma_async_tx_callback callback = NULL; | ||
586 | void *callback_param = NULL; | ||
587 | struct tegra_dma_desc *dma_desc; | ||
588 | unsigned long flags; | ||
589 | int cb_count; | ||
590 | |||
591 | spin_lock_irqsave(&tdc->lock, flags); | ||
592 | while (!list_empty(&tdc->cb_desc)) { | ||
593 | dma_desc = list_first_entry(&tdc->cb_desc, | ||
594 | typeof(*dma_desc), cb_node); | ||
595 | list_del(&dma_desc->cb_node); | ||
596 | callback = dma_desc->txd.callback; | ||
597 | callback_param = dma_desc->txd.callback_param; | ||
598 | cb_count = dma_desc->cb_count; | ||
599 | dma_desc->cb_count = 0; | ||
600 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
601 | while (cb_count-- && callback) | ||
602 | callback(callback_param); | ||
603 | spin_lock_irqsave(&tdc->lock, flags); | ||
604 | } | ||
605 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
606 | } | ||
607 | |||
608 | static irqreturn_t tegra_dma_isr(int irq, void *dev_id) | ||
609 | { | ||
610 | struct tegra_dma_channel *tdc = dev_id; | ||
611 | unsigned long status; | ||
612 | unsigned long flags; | ||
613 | |||
614 | spin_lock_irqsave(&tdc->lock, flags); | ||
615 | |||
616 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); | ||
617 | if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { | ||
618 | tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status); | ||
619 | tdc->isr_handler(tdc, false); | ||
620 | tasklet_schedule(&tdc->tasklet); | ||
621 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
622 | return IRQ_HANDLED; | ||
623 | } | ||
624 | |||
625 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
626 | dev_info(tdc2dev(tdc), | ||
627 | "Interrupt already served status 0x%08lx\n", status); | ||
628 | return IRQ_NONE; | ||
629 | } | ||
630 | |||
631 | static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd) | ||
632 | { | ||
633 | struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd); | ||
634 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan); | ||
635 | unsigned long flags; | ||
636 | dma_cookie_t cookie; | ||
637 | |||
638 | spin_lock_irqsave(&tdc->lock, flags); | ||
639 | dma_desc->dma_status = DMA_IN_PROGRESS; | ||
640 | cookie = dma_cookie_assign(&dma_desc->txd); | ||
641 | list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req); | ||
642 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
643 | return cookie; | ||
644 | } | ||
645 | |||
646 | static void tegra_dma_issue_pending(struct dma_chan *dc) | ||
647 | { | ||
648 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); | ||
649 | unsigned long flags; | ||
650 | |||
651 | spin_lock_irqsave(&tdc->lock, flags); | ||
652 | if (list_empty(&tdc->pending_sg_req)) { | ||
653 | dev_err(tdc2dev(tdc), "No DMA request\n"); | ||
654 | goto end; | ||
655 | } | ||
656 | if (!tdc->busy) { | ||
657 | tdc_start_head_req(tdc); | ||
658 | |||
659 | /* Continuous single mode: Configure next req */ | ||
660 | if (tdc->cyclic) { | ||
661 | /* | ||
662 | * Wait for 1 burst time for configure DMA for | ||
663 | * next transfer. | ||
664 | */ | ||
665 | udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME); | ||
666 | tdc_configure_next_head_desc(tdc); | ||
667 | } | ||
668 | } | ||
669 | end: | ||
670 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
671 | return; | ||
672 | } | ||
673 | |||
674 | static void tegra_dma_terminate_all(struct dma_chan *dc) | ||
675 | { | ||
676 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); | ||
677 | struct tegra_dma_sg_req *sgreq; | ||
678 | struct tegra_dma_desc *dma_desc; | ||
679 | unsigned long flags; | ||
680 | unsigned long status; | ||
681 | bool was_busy; | ||
682 | |||
683 | spin_lock_irqsave(&tdc->lock, flags); | ||
684 | if (list_empty(&tdc->pending_sg_req)) { | ||
685 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
686 | return; | ||
687 | } | ||
688 | |||
689 | if (!tdc->busy) | ||
690 | goto skip_dma_stop; | ||
691 | |||
692 | /* Pause DMA before checking the queue status */ | ||
693 | tegra_dma_global_pause(tdc, true); | ||
694 | |||
695 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); | ||
696 | if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { | ||
697 | dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__); | ||
698 | tdc->isr_handler(tdc, true); | ||
699 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); | ||
700 | } | ||
701 | |||
702 | was_busy = tdc->busy; | ||
703 | tegra_dma_stop(tdc); | ||
704 | |||
705 | if (!list_empty(&tdc->pending_sg_req) && was_busy) { | ||
706 | sgreq = list_first_entry(&tdc->pending_sg_req, | ||
707 | typeof(*sgreq), node); | ||
708 | sgreq->dma_desc->bytes_transferred += | ||
709 | get_current_xferred_count(tdc, sgreq, status); | ||
710 | } | ||
711 | tegra_dma_global_resume(tdc); | ||
712 | |||
713 | skip_dma_stop: | ||
714 | tegra_dma_abort_all(tdc); | ||
715 | |||
716 | while (!list_empty(&tdc->cb_desc)) { | ||
717 | dma_desc = list_first_entry(&tdc->cb_desc, | ||
718 | typeof(*dma_desc), cb_node); | ||
719 | list_del(&dma_desc->cb_node); | ||
720 | dma_desc->cb_count = 0; | ||
721 | } | ||
722 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
723 | } | ||
724 | |||
725 | static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, | ||
726 | dma_cookie_t cookie, struct dma_tx_state *txstate) | ||
727 | { | ||
728 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); | ||
729 | struct tegra_dma_desc *dma_desc; | ||
730 | struct tegra_dma_sg_req *sg_req; | ||
731 | enum dma_status ret; | ||
732 | unsigned long flags; | ||
733 | unsigned int residual; | ||
734 | |||
735 | spin_lock_irqsave(&tdc->lock, flags); | ||
736 | |||
737 | ret = dma_cookie_status(dc, cookie, txstate); | ||
738 | if (ret == DMA_SUCCESS) { | ||
739 | dma_set_residue(txstate, 0); | ||
740 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
741 | return ret; | ||
742 | } | ||
743 | |||
744 | /* Check on wait_ack desc status */ | ||
745 | list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { | ||
746 | if (dma_desc->txd.cookie == cookie) { | ||
747 | residual = dma_desc->bytes_requested - | ||
748 | (dma_desc->bytes_transferred % | ||
749 | dma_desc->bytes_requested); | ||
750 | dma_set_residue(txstate, residual); | ||
751 | ret = dma_desc->dma_status; | ||
752 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
753 | return ret; | ||
754 | } | ||
755 | } | ||
756 | |||
757 | /* Check in pending list */ | ||
758 | list_for_each_entry(sg_req, &tdc->pending_sg_req, node) { | ||
759 | dma_desc = sg_req->dma_desc; | ||
760 | if (dma_desc->txd.cookie == cookie) { | ||
761 | residual = dma_desc->bytes_requested - | ||
762 | (dma_desc->bytes_transferred % | ||
763 | dma_desc->bytes_requested); | ||
764 | dma_set_residue(txstate, residual); | ||
765 | ret = dma_desc->dma_status; | ||
766 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
767 | return ret; | ||
768 | } | ||
769 | } | ||
770 | |||
771 | dev_dbg(tdc2dev(tdc), "cookie %d does not found\n", cookie); | ||
772 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
773 | return ret; | ||
774 | } | ||
775 | |||
776 | static int tegra_dma_device_control(struct dma_chan *dc, enum dma_ctrl_cmd cmd, | ||
777 | unsigned long arg) | ||
778 | { | ||
779 | switch (cmd) { | ||
780 | case DMA_SLAVE_CONFIG: | ||
781 | return tegra_dma_slave_config(dc, | ||
782 | (struct dma_slave_config *)arg); | ||
783 | |||
784 | case DMA_TERMINATE_ALL: | ||
785 | tegra_dma_terminate_all(dc); | ||
786 | return 0; | ||
787 | |||
788 | default: | ||
789 | break; | ||
790 | } | ||
791 | |||
792 | return -ENXIO; | ||
793 | } | ||
794 | |||
795 | static inline int get_bus_width(struct tegra_dma_channel *tdc, | ||
796 | enum dma_slave_buswidth slave_bw) | ||
797 | { | ||
798 | switch (slave_bw) { | ||
799 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
800 | return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8; | ||
801 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
802 | return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16; | ||
803 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
804 | return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32; | ||
805 | case DMA_SLAVE_BUSWIDTH_8_BYTES: | ||
806 | return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64; | ||
807 | default: | ||
808 | dev_warn(tdc2dev(tdc), | ||
809 | "slave bw is not supported, using 32bits\n"); | ||
810 | return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32; | ||
811 | } | ||
812 | } | ||
813 | |||
814 | static inline int get_burst_size(struct tegra_dma_channel *tdc, | ||
815 | u32 burst_size, enum dma_slave_buswidth slave_bw, int len) | ||
816 | { | ||
817 | int burst_byte; | ||
818 | int burst_ahb_width; | ||
819 | |||
820 | /* | ||
821 | * burst_size from client is in terms of the bus_width. | ||
822 | * convert them into AHB memory width which is 4 byte. | ||
823 | */ | ||
824 | burst_byte = burst_size * slave_bw; | ||
825 | burst_ahb_width = burst_byte / 4; | ||
826 | |||
827 | /* If burst size is 0 then calculate the burst size based on length */ | ||
828 | if (!burst_ahb_width) { | ||
829 | if (len & 0xF) | ||
830 | return TEGRA_APBDMA_AHBSEQ_BURST_1; | ||
831 | else if ((len >> 4) & 0x1) | ||
832 | return TEGRA_APBDMA_AHBSEQ_BURST_4; | ||
833 | else | ||
834 | return TEGRA_APBDMA_AHBSEQ_BURST_8; | ||
835 | } | ||
836 | if (burst_ahb_width < 4) | ||
837 | return TEGRA_APBDMA_AHBSEQ_BURST_1; | ||
838 | else if (burst_ahb_width < 8) | ||
839 | return TEGRA_APBDMA_AHBSEQ_BURST_4; | ||
840 | else | ||
841 | return TEGRA_APBDMA_AHBSEQ_BURST_8; | ||
842 | } | ||
843 | |||
844 | static int get_transfer_param(struct tegra_dma_channel *tdc, | ||
845 | enum dma_transfer_direction direction, unsigned long *apb_addr, | ||
846 | unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size, | ||
847 | enum dma_slave_buswidth *slave_bw) | ||
848 | { | ||
849 | |||
850 | switch (direction) { | ||
851 | case DMA_MEM_TO_DEV: | ||
852 | *apb_addr = tdc->dma_sconfig.dst_addr; | ||
853 | *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width); | ||
854 | *burst_size = tdc->dma_sconfig.dst_maxburst; | ||
855 | *slave_bw = tdc->dma_sconfig.dst_addr_width; | ||
856 | *csr = TEGRA_APBDMA_CSR_DIR; | ||
857 | return 0; | ||
858 | |||
859 | case DMA_DEV_TO_MEM: | ||
860 | *apb_addr = tdc->dma_sconfig.src_addr; | ||
861 | *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width); | ||
862 | *burst_size = tdc->dma_sconfig.src_maxburst; | ||
863 | *slave_bw = tdc->dma_sconfig.src_addr_width; | ||
864 | *csr = 0; | ||
865 | return 0; | ||
866 | |||
867 | default: | ||
868 | dev_err(tdc2dev(tdc), "Dma direction is not supported\n"); | ||
869 | return -EINVAL; | ||
870 | } | ||
871 | return -EINVAL; | ||
872 | } | ||
873 | |||
874 | static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( | ||
875 | struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len, | ||
876 | enum dma_transfer_direction direction, unsigned long flags, | ||
877 | void *context) | ||
878 | { | ||
879 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); | ||
880 | struct tegra_dma_desc *dma_desc; | ||
881 | unsigned int i; | ||
882 | struct scatterlist *sg; | ||
883 | unsigned long csr, ahb_seq, apb_ptr, apb_seq; | ||
884 | struct list_head req_list; | ||
885 | struct tegra_dma_sg_req *sg_req = NULL; | ||
886 | u32 burst_size; | ||
887 | enum dma_slave_buswidth slave_bw; | ||
888 | int ret; | ||
889 | |||
890 | if (!tdc->config_init) { | ||
891 | dev_err(tdc2dev(tdc), "dma channel is not configured\n"); | ||
892 | return NULL; | ||
893 | } | ||
894 | if (sg_len < 1) { | ||
895 | dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len); | ||
896 | return NULL; | ||
897 | } | ||
898 | |||
899 | ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr, | ||
900 | &burst_size, &slave_bw); | ||
901 | if (ret < 0) | ||
902 | return NULL; | ||
903 | |||
904 | INIT_LIST_HEAD(&req_list); | ||
905 | |||
906 | ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB; | ||
907 | ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE << | ||
908 | TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT; | ||
909 | ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32; | ||
910 | |||
911 | csr |= TEGRA_APBDMA_CSR_ONCE | TEGRA_APBDMA_CSR_FLOW; | ||
912 | csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; | ||
913 | if (flags & DMA_PREP_INTERRUPT) | ||
914 | csr |= TEGRA_APBDMA_CSR_IE_EOC; | ||
915 | |||
916 | apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; | ||
917 | |||
918 | dma_desc = tegra_dma_desc_get(tdc); | ||
919 | if (!dma_desc) { | ||
920 | dev_err(tdc2dev(tdc), "Dma descriptors not available\n"); | ||
921 | return NULL; | ||
922 | } | ||
923 | INIT_LIST_HEAD(&dma_desc->tx_list); | ||
924 | INIT_LIST_HEAD(&dma_desc->cb_node); | ||
925 | dma_desc->cb_count = 0; | ||
926 | dma_desc->bytes_requested = 0; | ||
927 | dma_desc->bytes_transferred = 0; | ||
928 | dma_desc->dma_status = DMA_IN_PROGRESS; | ||
929 | |||
930 | /* Make transfer requests */ | ||
931 | for_each_sg(sgl, sg, sg_len, i) { | ||
932 | u32 len, mem; | ||
933 | |||
934 | mem = sg_dma_address(sg); | ||
935 | len = sg_dma_len(sg); | ||
936 | |||
937 | if ((len & 3) || (mem & 3) || | ||
938 | (len > tdc->tdma->chip_data->max_dma_count)) { | ||
939 | dev_err(tdc2dev(tdc), | ||
940 | "Dma length/memory address is not supported\n"); | ||
941 | tegra_dma_desc_put(tdc, dma_desc); | ||
942 | return NULL; | ||
943 | } | ||
944 | |||
945 | sg_req = tegra_dma_sg_req_get(tdc); | ||
946 | if (!sg_req) { | ||
947 | dev_err(tdc2dev(tdc), "Dma sg-req not available\n"); | ||
948 | tegra_dma_desc_put(tdc, dma_desc); | ||
949 | return NULL; | ||
950 | } | ||
951 | |||
952 | ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len); | ||
953 | dma_desc->bytes_requested += len; | ||
954 | |||
955 | sg_req->ch_regs.apb_ptr = apb_ptr; | ||
956 | sg_req->ch_regs.ahb_ptr = mem; | ||
957 | sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC); | ||
958 | sg_req->ch_regs.apb_seq = apb_seq; | ||
959 | sg_req->ch_regs.ahb_seq = ahb_seq; | ||
960 | sg_req->configured = false; | ||
961 | sg_req->last_sg = false; | ||
962 | sg_req->dma_desc = dma_desc; | ||
963 | sg_req->req_len = len; | ||
964 | |||
965 | list_add_tail(&sg_req->node, &dma_desc->tx_list); | ||
966 | } | ||
967 | sg_req->last_sg = true; | ||
968 | if (flags & DMA_CTRL_ACK) | ||
969 | dma_desc->txd.flags = DMA_CTRL_ACK; | ||
970 | |||
971 | /* | ||
972 | * Make sure that mode should not be conflicting with currently | ||
973 | * configured mode. | ||
974 | */ | ||
975 | if (!tdc->isr_handler) { | ||
976 | tdc->isr_handler = handle_once_dma_done; | ||
977 | tdc->cyclic = false; | ||
978 | } else { | ||
979 | if (tdc->cyclic) { | ||
980 | dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n"); | ||
981 | tegra_dma_desc_put(tdc, dma_desc); | ||
982 | return NULL; | ||
983 | } | ||
984 | } | ||
985 | |||
986 | return &dma_desc->txd; | ||
987 | } | ||
988 | |||
989 | struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( | ||
990 | struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, | ||
991 | size_t period_len, enum dma_transfer_direction direction, | ||
992 | unsigned long flags, void *context) | ||
993 | { | ||
994 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); | ||
995 | struct tegra_dma_desc *dma_desc = NULL; | ||
996 | struct tegra_dma_sg_req *sg_req = NULL; | ||
997 | unsigned long csr, ahb_seq, apb_ptr, apb_seq; | ||
998 | int len; | ||
999 | size_t remain_len; | ||
1000 | dma_addr_t mem = buf_addr; | ||
1001 | u32 burst_size; | ||
1002 | enum dma_slave_buswidth slave_bw; | ||
1003 | int ret; | ||
1004 | |||
1005 | if (!buf_len || !period_len) { | ||
1006 | dev_err(tdc2dev(tdc), "Invalid buffer/period len\n"); | ||
1007 | return NULL; | ||
1008 | } | ||
1009 | |||
1010 | if (!tdc->config_init) { | ||
1011 | dev_err(tdc2dev(tdc), "DMA slave is not configured\n"); | ||
1012 | return NULL; | ||
1013 | } | ||
1014 | |||
1015 | /* | ||
1016 | * We allow to take more number of requests till DMA is | ||
1017 | * not started. The driver will loop over all requests. | ||
1018 | * Once DMA is started then new requests can be queued only after | ||
1019 | * terminating the DMA. | ||
1020 | */ | ||
1021 | if (tdc->busy) { | ||
1022 | dev_err(tdc2dev(tdc), "Request not allowed when dma running\n"); | ||
1023 | return NULL; | ||
1024 | } | ||
1025 | |||
1026 | /* | ||
1027 | * We only support cycle transfer when buf_len is multiple of | ||
1028 | * period_len. | ||
1029 | */ | ||
1030 | if (buf_len % period_len) { | ||
1031 | dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n"); | ||
1032 | return NULL; | ||
1033 | } | ||
1034 | |||
1035 | len = period_len; | ||
1036 | if ((len & 3) || (buf_addr & 3) || | ||
1037 | (len > tdc->tdma->chip_data->max_dma_count)) { | ||
1038 | dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n"); | ||
1039 | return NULL; | ||
1040 | } | ||
1041 | |||
1042 | ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr, | ||
1043 | &burst_size, &slave_bw); | ||
1044 | if (ret < 0) | ||
1045 | return NULL; | ||
1046 | |||
1047 | |||
1048 | ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB; | ||
1049 | ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE << | ||
1050 | TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT; | ||
1051 | ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32; | ||
1052 | |||
1053 | csr |= TEGRA_APBDMA_CSR_FLOW | TEGRA_APBDMA_CSR_IE_EOC; | ||
1054 | csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; | ||
1055 | |||
1056 | apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; | ||
1057 | |||
1058 | dma_desc = tegra_dma_desc_get(tdc); | ||
1059 | if (!dma_desc) { | ||
1060 | dev_err(tdc2dev(tdc), "not enough descriptors available\n"); | ||
1061 | return NULL; | ||
1062 | } | ||
1063 | |||
1064 | INIT_LIST_HEAD(&dma_desc->tx_list); | ||
1065 | INIT_LIST_HEAD(&dma_desc->cb_node); | ||
1066 | dma_desc->cb_count = 0; | ||
1067 | |||
1068 | dma_desc->bytes_transferred = 0; | ||
1069 | dma_desc->bytes_requested = buf_len; | ||
1070 | remain_len = buf_len; | ||
1071 | |||
1072 | /* Split transfer equal to period size */ | ||
1073 | while (remain_len) { | ||
1074 | sg_req = tegra_dma_sg_req_get(tdc); | ||
1075 | if (!sg_req) { | ||
1076 | dev_err(tdc2dev(tdc), "Dma sg-req not available\n"); | ||
1077 | tegra_dma_desc_put(tdc, dma_desc); | ||
1078 | return NULL; | ||
1079 | } | ||
1080 | |||
1081 | ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len); | ||
1082 | sg_req->ch_regs.apb_ptr = apb_ptr; | ||
1083 | sg_req->ch_regs.ahb_ptr = mem; | ||
1084 | sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC); | ||
1085 | sg_req->ch_regs.apb_seq = apb_seq; | ||
1086 | sg_req->ch_regs.ahb_seq = ahb_seq; | ||
1087 | sg_req->configured = false; | ||
1088 | sg_req->half_done = false; | ||
1089 | sg_req->last_sg = false; | ||
1090 | sg_req->dma_desc = dma_desc; | ||
1091 | sg_req->req_len = len; | ||
1092 | |||
1093 | list_add_tail(&sg_req->node, &dma_desc->tx_list); | ||
1094 | remain_len -= len; | ||
1095 | mem += len; | ||
1096 | } | ||
1097 | sg_req->last_sg = true; | ||
1098 | dma_desc->txd.flags = 0; | ||
1099 | |||
1100 | /* | ||
1101 | * Make sure that mode should not be conflicting with currently | ||
1102 | * configured mode. | ||
1103 | */ | ||
1104 | if (!tdc->isr_handler) { | ||
1105 | tdc->isr_handler = handle_cont_sngl_cycle_dma_done; | ||
1106 | tdc->cyclic = true; | ||
1107 | } else { | ||
1108 | if (!tdc->cyclic) { | ||
1109 | dev_err(tdc2dev(tdc), "DMA configuration conflict\n"); | ||
1110 | tegra_dma_desc_put(tdc, dma_desc); | ||
1111 | return NULL; | ||
1112 | } | ||
1113 | } | ||
1114 | |||
1115 | return &dma_desc->txd; | ||
1116 | } | ||
1117 | |||
1118 | static int tegra_dma_alloc_chan_resources(struct dma_chan *dc) | ||
1119 | { | ||
1120 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); | ||
1121 | struct tegra_dma *tdma = tdc->tdma; | ||
1122 | int ret; | ||
1123 | |||
1124 | dma_cookie_init(&tdc->dma_chan); | ||
1125 | tdc->config_init = false; | ||
1126 | ret = clk_prepare_enable(tdma->dma_clk); | ||
1127 | if (ret < 0) | ||
1128 | dev_err(tdc2dev(tdc), "clk_prepare_enable failed: %d\n", ret); | ||
1129 | return ret; | ||
1130 | } | ||
1131 | |||
1132 | static void tegra_dma_free_chan_resources(struct dma_chan *dc) | ||
1133 | { | ||
1134 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); | ||
1135 | struct tegra_dma *tdma = tdc->tdma; | ||
1136 | |||
1137 | struct tegra_dma_desc *dma_desc; | ||
1138 | struct tegra_dma_sg_req *sg_req; | ||
1139 | struct list_head dma_desc_list; | ||
1140 | struct list_head sg_req_list; | ||
1141 | unsigned long flags; | ||
1142 | |||
1143 | INIT_LIST_HEAD(&dma_desc_list); | ||
1144 | INIT_LIST_HEAD(&sg_req_list); | ||
1145 | |||
1146 | dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id); | ||
1147 | |||
1148 | if (tdc->busy) | ||
1149 | tegra_dma_terminate_all(dc); | ||
1150 | |||
1151 | spin_lock_irqsave(&tdc->lock, flags); | ||
1152 | list_splice_init(&tdc->pending_sg_req, &sg_req_list); | ||
1153 | list_splice_init(&tdc->free_sg_req, &sg_req_list); | ||
1154 | list_splice_init(&tdc->free_dma_desc, &dma_desc_list); | ||
1155 | INIT_LIST_HEAD(&tdc->cb_desc); | ||
1156 | tdc->config_init = false; | ||
1157 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
1158 | |||
1159 | while (!list_empty(&dma_desc_list)) { | ||
1160 | dma_desc = list_first_entry(&dma_desc_list, | ||
1161 | typeof(*dma_desc), node); | ||
1162 | list_del(&dma_desc->node); | ||
1163 | kfree(dma_desc); | ||
1164 | } | ||
1165 | |||
1166 | while (!list_empty(&sg_req_list)) { | ||
1167 | sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node); | ||
1168 | list_del(&sg_req->node); | ||
1169 | kfree(sg_req); | ||
1170 | } | ||
1171 | clk_disable_unprepare(tdma->dma_clk); | ||
1172 | } | ||
1173 | |||
1174 | /* Tegra20 specific DMA controller information */ | ||
1175 | static const struct tegra_dma_chip_data tegra20_dma_chip_data = { | ||
1176 | .nr_channels = 16, | ||
1177 | .max_dma_count = 1024UL * 64, | ||
1178 | }; | ||
1179 | |||
1180 | #if defined(CONFIG_OF) | ||
1181 | /* Tegra30 specific DMA controller information */ | ||
1182 | static const struct tegra_dma_chip_data tegra30_dma_chip_data = { | ||
1183 | .nr_channels = 32, | ||
1184 | .max_dma_count = 1024UL * 64, | ||
1185 | }; | ||
1186 | |||
1187 | static const struct of_device_id tegra_dma_of_match[] = { | ||
1188 | { | ||
1189 | .compatible = "nvidia,tegra30-apbdma", | ||
1190 | .data = &tegra30_dma_chip_data, | ||
1191 | }, { | ||
1192 | .compatible = "nvidia,tegra20-apbdma", | ||
1193 | .data = &tegra20_dma_chip_data, | ||
1194 | }, { | ||
1195 | }, | ||
1196 | }; | ||
1197 | MODULE_DEVICE_TABLE(of, tegra_dma_of_match); | ||
1198 | #endif | ||
1199 | |||
1200 | static int tegra_dma_probe(struct platform_device *pdev) | ||
1201 | { | ||
1202 | struct resource *res; | ||
1203 | struct tegra_dma *tdma; | ||
1204 | int ret; | ||
1205 | int i; | ||
1206 | const struct tegra_dma_chip_data *cdata = NULL; | ||
1207 | |||
1208 | if (pdev->dev.of_node) { | ||
1209 | const struct of_device_id *match; | ||
1210 | match = of_match_device(of_match_ptr(tegra_dma_of_match), | ||
1211 | &pdev->dev); | ||
1212 | if (!match) { | ||
1213 | dev_err(&pdev->dev, "Error: No device match found\n"); | ||
1214 | return -ENODEV; | ||
1215 | } | ||
1216 | cdata = match->data; | ||
1217 | } else { | ||
1218 | /* If no device tree then fallback to tegra20 */ | ||
1219 | cdata = &tegra20_dma_chip_data; | ||
1220 | } | ||
1221 | |||
1222 | tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels * | ||
1223 | sizeof(struct tegra_dma_channel), GFP_KERNEL); | ||
1224 | if (!tdma) { | ||
1225 | dev_err(&pdev->dev, "Error: memory allocation failed\n"); | ||
1226 | return -ENOMEM; | ||
1227 | } | ||
1228 | |||
1229 | tdma->dev = &pdev->dev; | ||
1230 | tdma->chip_data = cdata; | ||
1231 | platform_set_drvdata(pdev, tdma); | ||
1232 | |||
1233 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1234 | if (!res) { | ||
1235 | dev_err(&pdev->dev, "No mem resource for DMA\n"); | ||
1236 | return -EINVAL; | ||
1237 | } | ||
1238 | |||
1239 | tdma->base_addr = devm_request_and_ioremap(&pdev->dev, res); | ||
1240 | if (!tdma->base_addr) { | ||
1241 | dev_err(&pdev->dev, | ||
1242 | "Cannot request memregion/iomap dma address\n"); | ||
1243 | return -EADDRNOTAVAIL; | ||
1244 | } | ||
1245 | |||
1246 | tdma->dma_clk = devm_clk_get(&pdev->dev, NULL); | ||
1247 | if (IS_ERR(tdma->dma_clk)) { | ||
1248 | dev_err(&pdev->dev, "Error: Missing controller clock\n"); | ||
1249 | return PTR_ERR(tdma->dma_clk); | ||
1250 | } | ||
1251 | |||
1252 | spin_lock_init(&tdma->global_lock); | ||
1253 | |||
1254 | pm_runtime_enable(&pdev->dev); | ||
1255 | if (!pm_runtime_enabled(&pdev->dev)) { | ||
1256 | ret = tegra_dma_runtime_resume(&pdev->dev); | ||
1257 | if (ret) { | ||
1258 | dev_err(&pdev->dev, "dma_runtime_resume failed %d\n", | ||
1259 | ret); | ||
1260 | goto err_pm_disable; | ||
1261 | } | ||
1262 | } | ||
1263 | |||
1264 | /* Enable clock before accessing registers */ | ||
1265 | ret = clk_prepare_enable(tdma->dma_clk); | ||
1266 | if (ret < 0) { | ||
1267 | dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret); | ||
1268 | goto err_pm_disable; | ||
1269 | } | ||
1270 | |||
1271 | /* Reset DMA controller */ | ||
1272 | tegra_periph_reset_assert(tdma->dma_clk); | ||
1273 | udelay(2); | ||
1274 | tegra_periph_reset_deassert(tdma->dma_clk); | ||
1275 | |||
1276 | /* Enable global DMA registers */ | ||
1277 | tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE); | ||
1278 | tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0); | ||
1279 | tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul); | ||
1280 | |||
1281 | clk_disable_unprepare(tdma->dma_clk); | ||
1282 | |||
1283 | INIT_LIST_HEAD(&tdma->dma_dev.channels); | ||
1284 | for (i = 0; i < cdata->nr_channels; i++) { | ||
1285 | struct tegra_dma_channel *tdc = &tdma->channels[i]; | ||
1286 | |||
1287 | tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET + | ||
1288 | i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE; | ||
1289 | |||
1290 | res = platform_get_resource(pdev, IORESOURCE_IRQ, i); | ||
1291 | if (!res) { | ||
1292 | ret = -EINVAL; | ||
1293 | dev_err(&pdev->dev, "No irq resource for chan %d\n", i); | ||
1294 | goto err_irq; | ||
1295 | } | ||
1296 | tdc->irq = res->start; | ||
1297 | snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i); | ||
1298 | ret = devm_request_irq(&pdev->dev, tdc->irq, | ||
1299 | tegra_dma_isr, 0, tdc->name, tdc); | ||
1300 | if (ret) { | ||
1301 | dev_err(&pdev->dev, | ||
1302 | "request_irq failed with err %d channel %d\n", | ||
1303 | i, ret); | ||
1304 | goto err_irq; | ||
1305 | } | ||
1306 | |||
1307 | tdc->dma_chan.device = &tdma->dma_dev; | ||
1308 | dma_cookie_init(&tdc->dma_chan); | ||
1309 | list_add_tail(&tdc->dma_chan.device_node, | ||
1310 | &tdma->dma_dev.channels); | ||
1311 | tdc->tdma = tdma; | ||
1312 | tdc->id = i; | ||
1313 | |||
1314 | tasklet_init(&tdc->tasklet, tegra_dma_tasklet, | ||
1315 | (unsigned long)tdc); | ||
1316 | spin_lock_init(&tdc->lock); | ||
1317 | |||
1318 | INIT_LIST_HEAD(&tdc->pending_sg_req); | ||
1319 | INIT_LIST_HEAD(&tdc->free_sg_req); | ||
1320 | INIT_LIST_HEAD(&tdc->free_dma_desc); | ||
1321 | INIT_LIST_HEAD(&tdc->cb_desc); | ||
1322 | } | ||
1323 | |||
1324 | dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask); | ||
1325 | dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask); | ||
1326 | dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask); | ||
1327 | |||
1328 | tdma->dma_dev.dev = &pdev->dev; | ||
1329 | tdma->dma_dev.device_alloc_chan_resources = | ||
1330 | tegra_dma_alloc_chan_resources; | ||
1331 | tdma->dma_dev.device_free_chan_resources = | ||
1332 | tegra_dma_free_chan_resources; | ||
1333 | tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg; | ||
1334 | tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic; | ||
1335 | tdma->dma_dev.device_control = tegra_dma_device_control; | ||
1336 | tdma->dma_dev.device_tx_status = tegra_dma_tx_status; | ||
1337 | tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending; | ||
1338 | |||
1339 | ret = dma_async_device_register(&tdma->dma_dev); | ||
1340 | if (ret < 0) { | ||
1341 | dev_err(&pdev->dev, | ||
1342 | "Tegra20 APB DMA driver registration failed %d\n", ret); | ||
1343 | goto err_irq; | ||
1344 | } | ||
1345 | |||
1346 | dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n", | ||
1347 | cdata->nr_channels); | ||
1348 | return 0; | ||
1349 | |||
1350 | err_irq: | ||
1351 | while (--i >= 0) { | ||
1352 | struct tegra_dma_channel *tdc = &tdma->channels[i]; | ||
1353 | tasklet_kill(&tdc->tasklet); | ||
1354 | } | ||
1355 | |||
1356 | err_pm_disable: | ||
1357 | pm_runtime_disable(&pdev->dev); | ||
1358 | if (!pm_runtime_status_suspended(&pdev->dev)) | ||
1359 | tegra_dma_runtime_suspend(&pdev->dev); | ||
1360 | return ret; | ||
1361 | } | ||
1362 | |||
1363 | static int tegra_dma_remove(struct platform_device *pdev) | ||
1364 | { | ||
1365 | struct tegra_dma *tdma = platform_get_drvdata(pdev); | ||
1366 | int i; | ||
1367 | struct tegra_dma_channel *tdc; | ||
1368 | |||
1369 | dma_async_device_unregister(&tdma->dma_dev); | ||
1370 | |||
1371 | for (i = 0; i < tdma->chip_data->nr_channels; ++i) { | ||
1372 | tdc = &tdma->channels[i]; | ||
1373 | tasklet_kill(&tdc->tasklet); | ||
1374 | } | ||
1375 | |||
1376 | pm_runtime_disable(&pdev->dev); | ||
1377 | if (!pm_runtime_status_suspended(&pdev->dev)) | ||
1378 | tegra_dma_runtime_suspend(&pdev->dev); | ||
1379 | |||
1380 | return 0; | ||
1381 | } | ||
1382 | |||
1383 | static int tegra_dma_runtime_suspend(struct device *dev) | ||
1384 | { | ||
1385 | struct platform_device *pdev = to_platform_device(dev); | ||
1386 | struct tegra_dma *tdma = platform_get_drvdata(pdev); | ||
1387 | |||
1388 | clk_disable_unprepare(tdma->dma_clk); | ||
1389 | return 0; | ||
1390 | } | ||
1391 | |||
1392 | static int tegra_dma_runtime_resume(struct device *dev) | ||
1393 | { | ||
1394 | struct platform_device *pdev = to_platform_device(dev); | ||
1395 | struct tegra_dma *tdma = platform_get_drvdata(pdev); | ||
1396 | int ret; | ||
1397 | |||
1398 | ret = clk_prepare_enable(tdma->dma_clk); | ||
1399 | if (ret < 0) { | ||
1400 | dev_err(dev, "clk_enable failed: %d\n", ret); | ||
1401 | return ret; | ||
1402 | } | ||
1403 | return 0; | ||
1404 | } | ||
1405 | |||
1406 | static const struct dev_pm_ops tegra_dma_dev_pm_ops = { | ||
1407 | #ifdef CONFIG_PM_RUNTIME | ||
1408 | .runtime_suspend = tegra_dma_runtime_suspend, | ||
1409 | .runtime_resume = tegra_dma_runtime_resume, | ||
1410 | #endif | ||
1411 | }; | ||
1412 | |||
1413 | static struct platform_driver tegra_dmac_driver = { | ||
1414 | .driver = { | ||
1415 | .name = "tegra-apbdma", | ||
1416 | .owner = THIS_MODULE, | ||
1417 | .pm = &tegra_dma_dev_pm_ops, | ||
1418 | .of_match_table = of_match_ptr(tegra_dma_of_match), | ||
1419 | }, | ||
1420 | .probe = tegra_dma_probe, | ||
1421 | .remove = tegra_dma_remove, | ||
1422 | }; | ||
1423 | |||
1424 | module_platform_driver(tegra_dmac_driver); | ||
1425 | |||
1426 | MODULE_ALIAS("platform:tegra20-apbdma"); | ||
1427 | MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver"); | ||
1428 | MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); | ||
1429 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 952f823901a..f69f90a6187 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c | |||
@@ -31,8 +31,6 @@ | |||
31 | 31 | ||
32 | #include <linux/timb_dma.h> | 32 | #include <linux/timb_dma.h> |
33 | 33 | ||
34 | #include "dmaengine.h" | ||
35 | |||
36 | #define DRIVER_NAME "timb-dma" | 34 | #define DRIVER_NAME "timb-dma" |
37 | 35 | ||
38 | /* Global DMA registers */ | 36 | /* Global DMA registers */ |
@@ -86,12 +84,13 @@ struct timb_dma_chan { | |||
86 | especially the lists and descriptors, | 84 | especially the lists and descriptors, |
87 | from races between the tasklet and calls | 85 | from races between the tasklet and calls |
88 | from above */ | 86 | from above */ |
87 | dma_cookie_t last_completed_cookie; | ||
89 | bool ongoing; | 88 | bool ongoing; |
90 | struct list_head active_list; | 89 | struct list_head active_list; |
91 | struct list_head queue; | 90 | struct list_head queue; |
92 | struct list_head free_list; | 91 | struct list_head free_list; |
93 | unsigned int bytes_per_line; | 92 | unsigned int bytes_per_line; |
94 | enum dma_transfer_direction direction; | 93 | enum dma_data_direction direction; |
95 | unsigned int descs; /* Descriptors to allocate */ | 94 | unsigned int descs; /* Descriptors to allocate */ |
96 | unsigned int desc_elems; /* number of elems per descriptor */ | 95 | unsigned int desc_elems; /* number of elems per descriptor */ |
97 | }; | 96 | }; |
@@ -167,10 +166,10 @@ static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc, | |||
167 | 166 | ||
168 | if (single) | 167 | if (single) |
169 | dma_unmap_single(chan2dev(&td_chan->chan), addr, len, | 168 | dma_unmap_single(chan2dev(&td_chan->chan), addr, len, |
170 | DMA_TO_DEVICE); | 169 | td_chan->direction); |
171 | else | 170 | else |
172 | dma_unmap_page(chan2dev(&td_chan->chan), addr, len, | 171 | dma_unmap_page(chan2dev(&td_chan->chan), addr, len, |
173 | DMA_TO_DEVICE); | 172 | td_chan->direction); |
174 | } | 173 | } |
175 | 174 | ||
176 | static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single) | 175 | static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single) |
@@ -236,7 +235,7 @@ static void __td_start_dma(struct timb_dma_chan *td_chan) | |||
236 | "td_chan: %p, chan: %d, membase: %p\n", | 235 | "td_chan: %p, chan: %d, membase: %p\n", |
237 | td_chan, td_chan->chan.chan_id, td_chan->membase); | 236 | td_chan, td_chan->chan.chan_id, td_chan->membase); |
238 | 237 | ||
239 | if (td_chan->direction == DMA_DEV_TO_MEM) { | 238 | if (td_chan->direction == DMA_FROM_DEVICE) { |
240 | 239 | ||
241 | /* descriptor address */ | 240 | /* descriptor address */ |
242 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR); | 241 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR); |
@@ -279,13 +278,13 @@ static void __td_finish(struct timb_dma_chan *td_chan) | |||
279 | txd->cookie); | 278 | txd->cookie); |
280 | 279 | ||
281 | /* make sure to stop the transfer */ | 280 | /* make sure to stop the transfer */ |
282 | if (td_chan->direction == DMA_DEV_TO_MEM) | 281 | if (td_chan->direction == DMA_FROM_DEVICE) |
283 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER); | 282 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER); |
284 | /* Currently no support for stopping DMA transfers | 283 | /* Currently no support for stopping DMA transfers |
285 | else | 284 | else |
286 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR); | 285 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR); |
287 | */ | 286 | */ |
288 | dma_cookie_complete(txd); | 287 | td_chan->last_completed_cookie = txd->cookie; |
289 | td_chan->ongoing = false; | 288 | td_chan->ongoing = false; |
290 | 289 | ||
291 | callback = txd->callback; | 290 | callback = txd->callback; |
@@ -350,7 +349,12 @@ static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd) | |||
350 | dma_cookie_t cookie; | 349 | dma_cookie_t cookie; |
351 | 350 | ||
352 | spin_lock_bh(&td_chan->lock); | 351 | spin_lock_bh(&td_chan->lock); |
353 | cookie = dma_cookie_assign(txd); | 352 | |
353 | cookie = txd->chan->cookie; | ||
354 | if (++cookie < 0) | ||
355 | cookie = 1; | ||
356 | txd->chan->cookie = cookie; | ||
357 | txd->cookie = cookie; | ||
354 | 358 | ||
355 | if (list_empty(&td_chan->active_list)) { | 359 | if (list_empty(&td_chan->active_list)) { |
356 | dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__, | 360 | dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__, |
@@ -477,7 +481,8 @@ static int td_alloc_chan_resources(struct dma_chan *chan) | |||
477 | } | 481 | } |
478 | 482 | ||
479 | spin_lock_bh(&td_chan->lock); | 483 | spin_lock_bh(&td_chan->lock); |
480 | dma_cookie_init(chan); | 484 | td_chan->last_completed_cookie = 1; |
485 | chan->cookie = 1; | ||
481 | spin_unlock_bh(&td_chan->lock); | 486 | spin_unlock_bh(&td_chan->lock); |
482 | 487 | ||
483 | return 0; | 488 | return 0; |
@@ -510,13 +515,24 @@ static void td_free_chan_resources(struct dma_chan *chan) | |||
510 | static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | 515 | static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie, |
511 | struct dma_tx_state *txstate) | 516 | struct dma_tx_state *txstate) |
512 | { | 517 | { |
513 | enum dma_status ret; | 518 | struct timb_dma_chan *td_chan = |
519 | container_of(chan, struct timb_dma_chan, chan); | ||
520 | dma_cookie_t last_used; | ||
521 | dma_cookie_t last_complete; | ||
522 | int ret; | ||
514 | 523 | ||
515 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); | 524 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); |
516 | 525 | ||
517 | ret = dma_cookie_status(chan, cookie, txstate); | 526 | last_complete = td_chan->last_completed_cookie; |
527 | last_used = chan->cookie; | ||
528 | |||
529 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
530 | |||
531 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
518 | 532 | ||
519 | dev_dbg(chan2dev(chan), "%s: exit, ret: %d\n", __func__, ret); | 533 | dev_dbg(chan2dev(chan), |
534 | "%s: exit, ret: %d, last_complete: %d, last_used: %d\n", | ||
535 | __func__, ret, last_complete, last_used); | ||
520 | 536 | ||
521 | return ret; | 537 | return ret; |
522 | } | 538 | } |
@@ -542,8 +558,7 @@ static void td_issue_pending(struct dma_chan *chan) | |||
542 | 558 | ||
543 | static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, | 559 | static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, |
544 | struct scatterlist *sgl, unsigned int sg_len, | 560 | struct scatterlist *sgl, unsigned int sg_len, |
545 | enum dma_transfer_direction direction, unsigned long flags, | 561 | enum dma_data_direction direction, unsigned long flags) |
546 | void *context) | ||
547 | { | 562 | { |
548 | struct timb_dma_chan *td_chan = | 563 | struct timb_dma_chan *td_chan = |
549 | container_of(chan, struct timb_dma_chan, chan); | 564 | container_of(chan, struct timb_dma_chan, chan); |
@@ -591,7 +606,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, | |||
591 | } | 606 | } |
592 | 607 | ||
593 | dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys, | 608 | dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys, |
594 | td_desc->desc_list_len, DMA_MEM_TO_DEV); | 609 | td_desc->desc_list_len, DMA_TO_DEVICE); |
595 | 610 | ||
596 | return &td_desc->txd; | 611 | return &td_desc->txd; |
597 | } | 612 | } |
@@ -667,7 +682,7 @@ static irqreturn_t td_irq(int irq, void *devid) | |||
667 | } | 682 | } |
668 | 683 | ||
669 | 684 | ||
670 | static int td_probe(struct platform_device *pdev) | 685 | static int __devinit td_probe(struct platform_device *pdev) |
671 | { | 686 | { |
672 | struct timb_dma_platform_data *pdata = pdev->dev.platform_data; | 687 | struct timb_dma_platform_data *pdata = pdev->dev.platform_data; |
673 | struct timb_dma *td; | 688 | struct timb_dma *td; |
@@ -738,7 +753,7 @@ static int td_probe(struct platform_device *pdev) | |||
738 | 753 | ||
739 | INIT_LIST_HEAD(&td->dma.channels); | 754 | INIT_LIST_HEAD(&td->dma.channels); |
740 | 755 | ||
741 | for (i = 0; i < pdata->nr_channels; i++) { | 756 | for (i = 0; i < pdata->nr_channels; i++, td->dma.chancnt++) { |
742 | struct timb_dma_chan *td_chan = &td->channels[i]; | 757 | struct timb_dma_chan *td_chan = &td->channels[i]; |
743 | struct timb_dma_platform_data_channel *pchan = | 758 | struct timb_dma_platform_data_channel *pchan = |
744 | pdata->channels + i; | 759 | pdata->channels + i; |
@@ -747,11 +762,12 @@ static int td_probe(struct platform_device *pdev) | |||
747 | if ((i % 2) == pchan->rx) { | 762 | if ((i % 2) == pchan->rx) { |
748 | dev_err(&pdev->dev, "Wrong channel configuration\n"); | 763 | dev_err(&pdev->dev, "Wrong channel configuration\n"); |
749 | err = -EINVAL; | 764 | err = -EINVAL; |
750 | goto err_free_irq; | 765 | goto err_tasklet_kill; |
751 | } | 766 | } |
752 | 767 | ||
753 | td_chan->chan.device = &td->dma; | 768 | td_chan->chan.device = &td->dma; |
754 | dma_cookie_init(&td_chan->chan); | 769 | td_chan->chan.cookie = 1; |
770 | td_chan->chan.chan_id = i; | ||
755 | spin_lock_init(&td_chan->lock); | 771 | spin_lock_init(&td_chan->lock); |
756 | INIT_LIST_HEAD(&td_chan->active_list); | 772 | INIT_LIST_HEAD(&td_chan->active_list); |
757 | INIT_LIST_HEAD(&td_chan->queue); | 773 | INIT_LIST_HEAD(&td_chan->queue); |
@@ -760,8 +776,8 @@ static int td_probe(struct platform_device *pdev) | |||
760 | td_chan->descs = pchan->descriptors; | 776 | td_chan->descs = pchan->descriptors; |
761 | td_chan->desc_elems = pchan->descriptor_elements; | 777 | td_chan->desc_elems = pchan->descriptor_elements; |
762 | td_chan->bytes_per_line = pchan->bytes_per_line; | 778 | td_chan->bytes_per_line = pchan->bytes_per_line; |
763 | td_chan->direction = pchan->rx ? DMA_DEV_TO_MEM : | 779 | td_chan->direction = pchan->rx ? DMA_FROM_DEVICE : |
764 | DMA_MEM_TO_DEV; | 780 | DMA_TO_DEVICE; |
765 | 781 | ||
766 | td_chan->membase = td->membase + | 782 | td_chan->membase = td->membase + |
767 | (i / 2) * TIMBDMA_INSTANCE_OFFSET + | 783 | (i / 2) * TIMBDMA_INSTANCE_OFFSET + |
@@ -798,7 +814,7 @@ err_release_region: | |||
798 | 814 | ||
799 | } | 815 | } |
800 | 816 | ||
801 | static int td_remove(struct platform_device *pdev) | 817 | static int __devexit td_remove(struct platform_device *pdev) |
802 | { | 818 | { |
803 | struct timb_dma *td = platform_get_drvdata(pdev); | 819 | struct timb_dma *td = platform_get_drvdata(pdev); |
804 | struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 820 | struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
@@ -826,7 +842,17 @@ static struct platform_driver td_driver = { | |||
826 | .remove = __exit_p(td_remove), | 842 | .remove = __exit_p(td_remove), |
827 | }; | 843 | }; |
828 | 844 | ||
829 | module_platform_driver(td_driver); | 845 | static int __init td_init(void) |
846 | { | ||
847 | return platform_driver_register(&td_driver); | ||
848 | } | ||
849 | module_init(td_init); | ||
850 | |||
851 | static void __exit td_exit(void) | ||
852 | { | ||
853 | platform_driver_unregister(&td_driver); | ||
854 | } | ||
855 | module_exit(td_exit); | ||
830 | 856 | ||
831 | MODULE_LICENSE("GPL v2"); | 857 | MODULE_LICENSE("GPL v2"); |
832 | MODULE_DESCRIPTION("Timberdale DMA controller driver"); | 858 | MODULE_DESCRIPTION("Timberdale DMA controller driver"); |
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index 913f55c76c9..cbd83e362b5 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c | |||
@@ -15,8 +15,6 @@ | |||
15 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/scatterlist.h> | 17 | #include <linux/scatterlist.h> |
18 | |||
19 | #include "dmaengine.h" | ||
20 | #include "txx9dmac.h" | 18 | #include "txx9dmac.h" |
21 | 19 | ||
22 | static struct txx9dmac_chan *to_txx9dmac_chan(struct dma_chan *chan) | 20 | static struct txx9dmac_chan *to_txx9dmac_chan(struct dma_chan *chan) |
@@ -281,6 +279,21 @@ static void txx9dmac_desc_put(struct txx9dmac_chan *dc, | |||
281 | } | 279 | } |
282 | } | 280 | } |
283 | 281 | ||
282 | /* Called with dc->lock held and bh disabled */ | ||
283 | static dma_cookie_t | ||
284 | txx9dmac_assign_cookie(struct txx9dmac_chan *dc, struct txx9dmac_desc *desc) | ||
285 | { | ||
286 | dma_cookie_t cookie = dc->chan.cookie; | ||
287 | |||
288 | if (++cookie < 0) | ||
289 | cookie = 1; | ||
290 | |||
291 | dc->chan.cookie = cookie; | ||
292 | desc->txd.cookie = cookie; | ||
293 | |||
294 | return cookie; | ||
295 | } | ||
296 | |||
284 | /*----------------------------------------------------------------------*/ | 297 | /*----------------------------------------------------------------------*/ |
285 | 298 | ||
286 | static void txx9dmac_dump_regs(struct txx9dmac_chan *dc) | 299 | static void txx9dmac_dump_regs(struct txx9dmac_chan *dc) |
@@ -411,7 +424,7 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, | |||
411 | dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", | 424 | dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", |
412 | txd->cookie, desc); | 425 | txd->cookie, desc); |
413 | 426 | ||
414 | dma_cookie_complete(txd); | 427 | dc->completed = txd->cookie; |
415 | callback = txd->callback; | 428 | callback = txd->callback; |
416 | param = txd->callback_param; | 429 | param = txd->callback_param; |
417 | 430 | ||
@@ -725,7 +738,7 @@ static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx) | |||
725 | dma_cookie_t cookie; | 738 | dma_cookie_t cookie; |
726 | 739 | ||
727 | spin_lock_bh(&dc->lock); | 740 | spin_lock_bh(&dc->lock); |
728 | cookie = dma_cookie_assign(tx); | 741 | cookie = txx9dmac_assign_cookie(dc, desc); |
729 | 742 | ||
730 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n", | 743 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n", |
731 | desc->txd.cookie, desc); | 744 | desc->txd.cookie, desc); |
@@ -832,8 +845,8 @@ txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
832 | 845 | ||
833 | static struct dma_async_tx_descriptor * | 846 | static struct dma_async_tx_descriptor * |
834 | txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 847 | txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
835 | unsigned int sg_len, enum dma_transfer_direction direction, | 848 | unsigned int sg_len, enum dma_data_direction direction, |
836 | unsigned long flags, void *context) | 849 | unsigned long flags) |
837 | { | 850 | { |
838 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | 851 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); |
839 | struct txx9dmac_dev *ddev = dc->ddev; | 852 | struct txx9dmac_dev *ddev = dc->ddev; |
@@ -847,9 +860,9 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
847 | 860 | ||
848 | BUG_ON(!ds || !ds->reg_width); | 861 | BUG_ON(!ds || !ds->reg_width); |
849 | if (ds->tx_reg) | 862 | if (ds->tx_reg) |
850 | BUG_ON(direction != DMA_MEM_TO_DEV); | 863 | BUG_ON(direction != DMA_TO_DEVICE); |
851 | else | 864 | else |
852 | BUG_ON(direction != DMA_DEV_TO_MEM); | 865 | BUG_ON(direction != DMA_FROM_DEVICE); |
853 | if (unlikely(!sg_len)) | 866 | if (unlikely(!sg_len)) |
854 | return NULL; | 867 | return NULL; |
855 | 868 | ||
@@ -869,7 +882,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
869 | mem = sg_dma_address(sg); | 882 | mem = sg_dma_address(sg); |
870 | 883 | ||
871 | if (__is_dmac64(ddev)) { | 884 | if (__is_dmac64(ddev)) { |
872 | if (direction == DMA_MEM_TO_DEV) { | 885 | if (direction == DMA_TO_DEVICE) { |
873 | desc->hwdesc.SAR = mem; | 886 | desc->hwdesc.SAR = mem; |
874 | desc->hwdesc.DAR = ds->tx_reg; | 887 | desc->hwdesc.DAR = ds->tx_reg; |
875 | } else { | 888 | } else { |
@@ -878,7 +891,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
878 | } | 891 | } |
879 | desc->hwdesc.CNTR = sg_dma_len(sg); | 892 | desc->hwdesc.CNTR = sg_dma_len(sg); |
880 | } else { | 893 | } else { |
881 | if (direction == DMA_MEM_TO_DEV) { | 894 | if (direction == DMA_TO_DEVICE) { |
882 | desc->hwdesc32.SAR = mem; | 895 | desc->hwdesc32.SAR = mem; |
883 | desc->hwdesc32.DAR = ds->tx_reg; | 896 | desc->hwdesc32.DAR = ds->tx_reg; |
884 | } else { | 897 | } else { |
@@ -887,7 +900,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
887 | } | 900 | } |
888 | desc->hwdesc32.CNTR = sg_dma_len(sg); | 901 | desc->hwdesc32.CNTR = sg_dma_len(sg); |
889 | } | 902 | } |
890 | if (direction == DMA_MEM_TO_DEV) { | 903 | if (direction == DMA_TO_DEVICE) { |
891 | sai = ds->reg_width; | 904 | sai = ds->reg_width; |
892 | dai = 0; | 905 | dai = 0; |
893 | } else { | 906 | } else { |
@@ -959,17 +972,27 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
959 | struct dma_tx_state *txstate) | 972 | struct dma_tx_state *txstate) |
960 | { | 973 | { |
961 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | 974 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); |
962 | enum dma_status ret; | 975 | dma_cookie_t last_used; |
976 | dma_cookie_t last_complete; | ||
977 | int ret; | ||
963 | 978 | ||
964 | ret = dma_cookie_status(chan, cookie, txstate); | 979 | last_complete = dc->completed; |
980 | last_used = chan->cookie; | ||
981 | |||
982 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
965 | if (ret != DMA_SUCCESS) { | 983 | if (ret != DMA_SUCCESS) { |
966 | spin_lock_bh(&dc->lock); | 984 | spin_lock_bh(&dc->lock); |
967 | txx9dmac_scan_descriptors(dc); | 985 | txx9dmac_scan_descriptors(dc); |
968 | spin_unlock_bh(&dc->lock); | 986 | spin_unlock_bh(&dc->lock); |
969 | 987 | ||
970 | ret = dma_cookie_status(chan, cookie, txstate); | 988 | last_complete = dc->completed; |
989 | last_used = chan->cookie; | ||
990 | |||
991 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
971 | } | 992 | } |
972 | 993 | ||
994 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
995 | |||
973 | return ret; | 996 | return ret; |
974 | } | 997 | } |
975 | 998 | ||
@@ -1034,7 +1057,7 @@ static int txx9dmac_alloc_chan_resources(struct dma_chan *chan) | |||
1034 | return -EIO; | 1057 | return -EIO; |
1035 | } | 1058 | } |
1036 | 1059 | ||
1037 | dma_cookie_init(chan); | 1060 | dc->completed = chan->cookie = 1; |
1038 | 1061 | ||
1039 | dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE; | 1062 | dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE; |
1040 | txx9dmac_chan_set_SMPCHN(dc); | 1063 | txx9dmac_chan_set_SMPCHN(dc); |
@@ -1163,7 +1186,7 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev) | |||
1163 | dc->ddev->chan[ch] = dc; | 1186 | dc->ddev->chan[ch] = dc; |
1164 | dc->chan.device = &dc->dma; | 1187 | dc->chan.device = &dc->dma; |
1165 | list_add_tail(&dc->chan.device_node, &dc->chan.device->channels); | 1188 | list_add_tail(&dc->chan.device_node, &dc->chan.device->channels); |
1166 | dma_cookie_init(&dc->chan); | 1189 | dc->chan.cookie = dc->completed = 1; |
1167 | 1190 | ||
1168 | if (is_dmac64(dc)) | 1191 | if (is_dmac64(dc)) |
1169 | dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch]; | 1192 | dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch]; |
diff --git a/drivers/dma/txx9dmac.h b/drivers/dma/txx9dmac.h index f5a76059888..365d42366b9 100644 --- a/drivers/dma/txx9dmac.h +++ b/drivers/dma/txx9dmac.h | |||
@@ -172,6 +172,7 @@ struct txx9dmac_chan { | |||
172 | spinlock_t lock; | 172 | spinlock_t lock; |
173 | 173 | ||
174 | /* these other elements are all protected by lock */ | 174 | /* these other elements are all protected by lock */ |
175 | dma_cookie_t completed; | ||
175 | struct list_head active_list; | 176 | struct list_head active_list; |
176 | struct list_head queue; | 177 | struct list_head queue; |
177 | struct list_head free_list; | 178 | struct list_head free_list; |
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c deleted file mode 100644 index 6f80432a3f0..00000000000 --- a/drivers/dma/virt-dma.c +++ /dev/null | |||
@@ -1,123 +0,0 @@ | |||
1 | /* | ||
2 | * Virtual DMA channel support for DMAengine | ||
3 | * | ||
4 | * Copyright (C) 2012 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/device.h> | ||
11 | #include <linux/dmaengine.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/spinlock.h> | ||
14 | |||
15 | #include "virt-dma.h" | ||
16 | |||
17 | static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx) | ||
18 | { | ||
19 | return container_of(tx, struct virt_dma_desc, tx); | ||
20 | } | ||
21 | |||
22 | dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) | ||
23 | { | ||
24 | struct virt_dma_chan *vc = to_virt_chan(tx->chan); | ||
25 | struct virt_dma_desc *vd = to_virt_desc(tx); | ||
26 | unsigned long flags; | ||
27 | dma_cookie_t cookie; | ||
28 | |||
29 | spin_lock_irqsave(&vc->lock, flags); | ||
30 | cookie = dma_cookie_assign(tx); | ||
31 | |||
32 | list_add_tail(&vd->node, &vc->desc_submitted); | ||
33 | spin_unlock_irqrestore(&vc->lock, flags); | ||
34 | |||
35 | dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", | ||
36 | vc, vd, cookie); | ||
37 | |||
38 | return cookie; | ||
39 | } | ||
40 | EXPORT_SYMBOL_GPL(vchan_tx_submit); | ||
41 | |||
42 | struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc, | ||
43 | dma_cookie_t cookie) | ||
44 | { | ||
45 | struct virt_dma_desc *vd; | ||
46 | |||
47 | list_for_each_entry(vd, &vc->desc_issued, node) | ||
48 | if (vd->tx.cookie == cookie) | ||
49 | return vd; | ||
50 | |||
51 | return NULL; | ||
52 | } | ||
53 | EXPORT_SYMBOL_GPL(vchan_find_desc); | ||
54 | |||
55 | /* | ||
56 | * This tasklet handles the completion of a DMA descriptor by | ||
57 | * calling its callback and freeing it. | ||
58 | */ | ||
59 | static void vchan_complete(unsigned long arg) | ||
60 | { | ||
61 | struct virt_dma_chan *vc = (struct virt_dma_chan *)arg; | ||
62 | struct virt_dma_desc *vd; | ||
63 | dma_async_tx_callback cb = NULL; | ||
64 | void *cb_data = NULL; | ||
65 | LIST_HEAD(head); | ||
66 | |||
67 | spin_lock_irq(&vc->lock); | ||
68 | list_splice_tail_init(&vc->desc_completed, &head); | ||
69 | vd = vc->cyclic; | ||
70 | if (vd) { | ||
71 | vc->cyclic = NULL; | ||
72 | cb = vd->tx.callback; | ||
73 | cb_data = vd->tx.callback_param; | ||
74 | } | ||
75 | spin_unlock_irq(&vc->lock); | ||
76 | |||
77 | if (cb) | ||
78 | cb(cb_data); | ||
79 | |||
80 | while (!list_empty(&head)) { | ||
81 | vd = list_first_entry(&head, struct virt_dma_desc, node); | ||
82 | cb = vd->tx.callback; | ||
83 | cb_data = vd->tx.callback_param; | ||
84 | |||
85 | list_del(&vd->node); | ||
86 | |||
87 | vc->desc_free(vd); | ||
88 | |||
89 | if (cb) | ||
90 | cb(cb_data); | ||
91 | } | ||
92 | } | ||
93 | |||
94 | void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) | ||
95 | { | ||
96 | while (!list_empty(head)) { | ||
97 | struct virt_dma_desc *vd = list_first_entry(head, | ||
98 | struct virt_dma_desc, node); | ||
99 | list_del(&vd->node); | ||
100 | dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); | ||
101 | vc->desc_free(vd); | ||
102 | } | ||
103 | } | ||
104 | EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); | ||
105 | |||
106 | void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) | ||
107 | { | ||
108 | dma_cookie_init(&vc->chan); | ||
109 | |||
110 | spin_lock_init(&vc->lock); | ||
111 | INIT_LIST_HEAD(&vc->desc_submitted); | ||
112 | INIT_LIST_HEAD(&vc->desc_issued); | ||
113 | INIT_LIST_HEAD(&vc->desc_completed); | ||
114 | |||
115 | tasklet_init(&vc->task, vchan_complete, (unsigned long)vc); | ||
116 | |||
117 | vc->chan.device = dmadev; | ||
118 | list_add_tail(&vc->chan.device_node, &dmadev->channels); | ||
119 | } | ||
120 | EXPORT_SYMBOL_GPL(vchan_init); | ||
121 | |||
122 | MODULE_AUTHOR("Russell King"); | ||
123 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h deleted file mode 100644 index 85c19d63f9f..00000000000 --- a/drivers/dma/virt-dma.h +++ /dev/null | |||
@@ -1,152 +0,0 @@ | |||
1 | /* | ||
2 | * Virtual DMA channel support for DMAengine | ||
3 | * | ||
4 | * Copyright (C) 2012 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #ifndef VIRT_DMA_H | ||
11 | #define VIRT_DMA_H | ||
12 | |||
13 | #include <linux/dmaengine.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | |||
16 | #include "dmaengine.h" | ||
17 | |||
18 | struct virt_dma_desc { | ||
19 | struct dma_async_tx_descriptor tx; | ||
20 | /* protected by vc.lock */ | ||
21 | struct list_head node; | ||
22 | }; | ||
23 | |||
24 | struct virt_dma_chan { | ||
25 | struct dma_chan chan; | ||
26 | struct tasklet_struct task; | ||
27 | void (*desc_free)(struct virt_dma_desc *); | ||
28 | |||
29 | spinlock_t lock; | ||
30 | |||
31 | /* protected by vc.lock */ | ||
32 | struct list_head desc_submitted; | ||
33 | struct list_head desc_issued; | ||
34 | struct list_head desc_completed; | ||
35 | |||
36 | struct virt_dma_desc *cyclic; | ||
37 | }; | ||
38 | |||
39 | static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan) | ||
40 | { | ||
41 | return container_of(chan, struct virt_dma_chan, chan); | ||
42 | } | ||
43 | |||
44 | void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head); | ||
45 | void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev); | ||
46 | struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t); | ||
47 | |||
48 | /** | ||
49 | * vchan_tx_prep - prepare a descriptor | ||
50 | * vc: virtual channel allocating this descriptor | ||
51 | * vd: virtual descriptor to prepare | ||
52 | * tx_flags: flags argument passed in to prepare function | ||
53 | */ | ||
54 | static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc, | ||
55 | struct virt_dma_desc *vd, unsigned long tx_flags) | ||
56 | { | ||
57 | extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); | ||
58 | |||
59 | dma_async_tx_descriptor_init(&vd->tx, &vc->chan); | ||
60 | vd->tx.flags = tx_flags; | ||
61 | vd->tx.tx_submit = vchan_tx_submit; | ||
62 | |||
63 | return &vd->tx; | ||
64 | } | ||
65 | |||
66 | /** | ||
67 | * vchan_issue_pending - move submitted descriptors to issued list | ||
68 | * vc: virtual channel to update | ||
69 | * | ||
70 | * vc.lock must be held by caller | ||
71 | */ | ||
72 | static inline bool vchan_issue_pending(struct virt_dma_chan *vc) | ||
73 | { | ||
74 | list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued); | ||
75 | return !list_empty(&vc->desc_issued); | ||
76 | } | ||
77 | |||
78 | /** | ||
79 | * vchan_cookie_complete - report completion of a descriptor | ||
80 | * vd: virtual descriptor to update | ||
81 | * | ||
82 | * vc.lock must be held by caller | ||
83 | */ | ||
84 | static inline void vchan_cookie_complete(struct virt_dma_desc *vd) | ||
85 | { | ||
86 | struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); | ||
87 | |||
88 | dma_cookie_complete(&vd->tx); | ||
89 | dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n", | ||
90 | vd, vd->tx.cookie); | ||
91 | list_add_tail(&vd->node, &vc->desc_completed); | ||
92 | |||
93 | tasklet_schedule(&vc->task); | ||
94 | } | ||
95 | |||
96 | /** | ||
97 | * vchan_cyclic_callback - report the completion of a period | ||
98 | * vd: virtual descriptor | ||
99 | */ | ||
100 | static inline void vchan_cyclic_callback(struct virt_dma_desc *vd) | ||
101 | { | ||
102 | struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); | ||
103 | |||
104 | vc->cyclic = vd; | ||
105 | tasklet_schedule(&vc->task); | ||
106 | } | ||
107 | |||
108 | /** | ||
109 | * vchan_next_desc - peek at the next descriptor to be processed | ||
110 | * vc: virtual channel to obtain descriptor from | ||
111 | * | ||
112 | * vc.lock must be held by caller | ||
113 | */ | ||
114 | static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) | ||
115 | { | ||
116 | if (list_empty(&vc->desc_issued)) | ||
117 | return NULL; | ||
118 | |||
119 | return list_first_entry(&vc->desc_issued, struct virt_dma_desc, node); | ||
120 | } | ||
121 | |||
122 | /** | ||
123 | * vchan_get_all_descriptors - obtain all submitted and issued descriptors | ||
124 | * vc: virtual channel to get descriptors from | ||
125 | * head: list of descriptors found | ||
126 | * | ||
127 | * vc.lock must be held by caller | ||
128 | * | ||
129 | * Removes all submitted and issued descriptors from internal lists, and | ||
130 | * provides a list of all descriptors found | ||
131 | */ | ||
132 | static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, | ||
133 | struct list_head *head) | ||
134 | { | ||
135 | list_splice_tail_init(&vc->desc_submitted, head); | ||
136 | list_splice_tail_init(&vc->desc_issued, head); | ||
137 | list_splice_tail_init(&vc->desc_completed, head); | ||
138 | } | ||
139 | |||
140 | static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) | ||
141 | { | ||
142 | unsigned long flags; | ||
143 | LIST_HEAD(head); | ||
144 | |||
145 | spin_lock_irqsave(&vc->lock, flags); | ||
146 | vchan_get_all_descriptors(vc, &head); | ||
147 | spin_unlock_irqrestore(&vc->lock, flags); | ||
148 | |||
149 | vchan_dma_desc_free_list(vc, &head); | ||
150 | } | ||
151 | |||
152 | #endif | ||