diff options
-rw-r--r-- | arch/arm/mach-mxs/include/mach/dma.h | 26 | ||||
-rw-r--r-- | arch/arm/plat-nomadik/include/plat/ste_dma40.h | 22 | ||||
-rw-r--r-- | arch/avr32/mach-at32ap/at32ap700x.c | 15 | ||||
-rw-r--r-- | drivers/dma/Kconfig | 12 | ||||
-rw-r--r-- | drivers/dma/Makefile | 1 | ||||
-rw-r--r-- | drivers/dma/dmatest.c | 14 | ||||
-rw-r--r-- | drivers/dma/dw_dmac.c | 103 | ||||
-rw-r--r-- | drivers/dma/dw_dmac_regs.h | 12 | ||||
-rw-r--r-- | drivers/dma/fsldma.c | 551 | ||||
-rw-r--r-- | drivers/dma/fsldma.h | 6 | ||||
-rw-r--r-- | drivers/dma/mxs-dma.c | 724 | ||||
-rw-r--r-- | drivers/dma/pch_dma.c | 35 | ||||
-rw-r--r-- | drivers/dma/ste_dma40.c | 1402 | ||||
-rw-r--r-- | drivers/dma/ste_dma40_ll.c | 218 | ||||
-rw-r--r-- | drivers/dma/ste_dma40_ll.h | 66 | ||||
-rw-r--r-- | include/linux/dw_dmac.h | 44 |
16 files changed, 1989 insertions, 1262 deletions
diff --git a/arch/arm/mach-mxs/include/mach/dma.h b/arch/arm/mach-mxs/include/mach/dma.h new file mode 100644 index 000000000000..7f4aeeaba8df --- /dev/null +++ b/arch/arm/mach-mxs/include/mach/dma.h | |||
@@ -0,0 +1,26 @@ | |||
1 | /* | ||
2 | * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #ifndef __MACH_MXS_DMA_H__ | ||
10 | #define __MACH_MXS_DMA_H__ | ||
11 | |||
12 | struct mxs_dma_data { | ||
13 | int chan_irq; | ||
14 | }; | ||
15 | |||
16 | static inline int mxs_dma_is_apbh(struct dma_chan *chan) | ||
17 | { | ||
18 | return !strcmp(dev_name(chan->device->dev), "mxs-dma-apbh"); | ||
19 | } | ||
20 | |||
21 | static inline int mxs_dma_is_apbx(struct dma_chan *chan) | ||
22 | { | ||
23 | return !strcmp(dev_name(chan->device->dev), "mxs-dma-apbx"); | ||
24 | } | ||
25 | |||
26 | #endif /* __MACH_MXS_DMA_H__ */ | ||
diff --git a/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/arch/arm/plat-nomadik/include/plat/ste_dma40.h index 4d6dd4c39b75..c44886062f8e 100644 --- a/arch/arm/plat-nomadik/include/plat/ste_dma40.h +++ b/arch/arm/plat-nomadik/include/plat/ste_dma40.h | |||
@@ -104,6 +104,8 @@ struct stedma40_half_channel_info { | |||
104 | * | 104 | * |
105 | * @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH | 105 | * @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH |
106 | * @high_priority: true if high-priority | 106 | * @high_priority: true if high-priority |
107 | * @realtime: true if realtime mode is to be enabled. Only available on DMA40 | ||
108 | * version 3+, i.e DB8500v2+ | ||
107 | * @mode: channel mode: physical, logical, or operation | 109 | * @mode: channel mode: physical, logical, or operation |
108 | * @mode_opt: options for the chosen channel mode | 110 | * @mode_opt: options for the chosen channel mode |
109 | * @src_dev_type: Src device type | 111 | * @src_dev_type: Src device type |
@@ -119,6 +121,7 @@ struct stedma40_half_channel_info { | |||
119 | struct stedma40_chan_cfg { | 121 | struct stedma40_chan_cfg { |
120 | enum stedma40_xfer_dir dir; | 122 | enum stedma40_xfer_dir dir; |
121 | bool high_priority; | 123 | bool high_priority; |
124 | bool realtime; | ||
122 | enum stedma40_mode mode; | 125 | enum stedma40_mode mode; |
123 | enum stedma40_mode_opt mode_opt; | 126 | enum stedma40_mode_opt mode_opt; |
124 | int src_dev_type; | 127 | int src_dev_type; |
@@ -169,25 +172,6 @@ struct stedma40_platform_data { | |||
169 | bool stedma40_filter(struct dma_chan *chan, void *data); | 172 | bool stedma40_filter(struct dma_chan *chan, void *data); |
170 | 173 | ||
171 | /** | 174 | /** |
172 | * stedma40_memcpy_sg() - extension of the dma framework, memcpy to/from | ||
173 | * scattergatter lists. | ||
174 | * | ||
175 | * @chan: dmaengine handle | ||
176 | * @sgl_dst: Destination scatter list | ||
177 | * @sgl_src: Source scatter list | ||
178 | * @sgl_len: The length of each scatterlist. Both lists must be of equal length | ||
179 | * and each element must match the corresponding element in the other scatter | ||
180 | * list. | ||
181 | * @flags: is actually enum dma_ctrl_flags. See dmaengine.h | ||
182 | */ | ||
183 | |||
184 | struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, | ||
185 | struct scatterlist *sgl_dst, | ||
186 | struct scatterlist *sgl_src, | ||
187 | unsigned int sgl_len, | ||
188 | unsigned long flags); | ||
189 | |||
190 | /** | ||
191 | * stedma40_slave_mem() - Transfers a raw data buffer to or from a slave | 175 | * stedma40_slave_mem() - Transfers a raw data buffer to or from a slave |
192 | * (=device) | 176 | * (=device) |
193 | * | 177 | * |
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c index e67c99945428..bfc9d071db9b 100644 --- a/arch/avr32/mach-at32ap/at32ap700x.c +++ b/arch/avr32/mach-at32ap/at32ap700x.c | |||
@@ -2048,6 +2048,11 @@ at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data, | |||
2048 | rx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT; | 2048 | rx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT; |
2049 | rx_dws->cfg_hi = DWC_CFGH_SRC_PER(3); | 2049 | rx_dws->cfg_hi = DWC_CFGH_SRC_PER(3); |
2050 | rx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); | 2050 | rx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); |
2051 | rx_dws->src_master = 0; | ||
2052 | rx_dws->dst_master = 1; | ||
2053 | rx_dws->src_msize = DW_DMA_MSIZE_1; | ||
2054 | rx_dws->dst_msize = DW_DMA_MSIZE_1; | ||
2055 | rx_dws->fc = DW_DMA_FC_D_P2M; | ||
2051 | } | 2056 | } |
2052 | 2057 | ||
2053 | /* Check if DMA slave interface for playback should be configured. */ | 2058 | /* Check if DMA slave interface for playback should be configured. */ |
@@ -2056,6 +2061,11 @@ at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data, | |||
2056 | tx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT; | 2061 | tx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT; |
2057 | tx_dws->cfg_hi = DWC_CFGH_DST_PER(4); | 2062 | tx_dws->cfg_hi = DWC_CFGH_DST_PER(4); |
2058 | tx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); | 2063 | tx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); |
2064 | tx_dws->src_master = 0; | ||
2065 | tx_dws->dst_master = 1; | ||
2066 | tx_dws->src_msize = DW_DMA_MSIZE_1; | ||
2067 | tx_dws->dst_msize = DW_DMA_MSIZE_1; | ||
2068 | tx_dws->fc = DW_DMA_FC_D_M2P; | ||
2059 | } | 2069 | } |
2060 | 2070 | ||
2061 | if (platform_device_add_data(pdev, data, | 2071 | if (platform_device_add_data(pdev, data, |
@@ -2128,6 +2138,11 @@ at32_add_device_abdac(unsigned int id, struct atmel_abdac_pdata *data) | |||
2128 | dws->reg_width = DW_DMA_SLAVE_WIDTH_32BIT; | 2138 | dws->reg_width = DW_DMA_SLAVE_WIDTH_32BIT; |
2129 | dws->cfg_hi = DWC_CFGH_DST_PER(2); | 2139 | dws->cfg_hi = DWC_CFGH_DST_PER(2); |
2130 | dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); | 2140 | dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); |
2141 | dws->src_master = 0; | ||
2142 | dws->dst_master = 1; | ||
2143 | dws->src_msize = DW_DMA_MSIZE_1; | ||
2144 | dws->dst_msize = DW_DMA_MSIZE_1; | ||
2145 | dws->fc = DW_DMA_FC_D_M2P; | ||
2131 | 2146 | ||
2132 | if (platform_device_add_data(pdev, data, | 2147 | if (platform_device_add_data(pdev, data, |
2133 | sizeof(struct atmel_abdac_pdata))) | 2148 | sizeof(struct atmel_abdac_pdata))) |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 1c28816152fa..a572600e44eb 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -82,7 +82,7 @@ config INTEL_IOP_ADMA | |||
82 | 82 | ||
83 | config DW_DMAC | 83 | config DW_DMAC |
84 | tristate "Synopsys DesignWare AHB DMA support" | 84 | tristate "Synopsys DesignWare AHB DMA support" |
85 | depends on AVR32 | 85 | depends on HAVE_CLK |
86 | select DMA_ENGINE | 86 | select DMA_ENGINE |
87 | default y if CPU_AT32AP7000 | 87 | default y if CPU_AT32AP7000 |
88 | help | 88 | help |
@@ -221,12 +221,20 @@ config IMX_SDMA | |||
221 | 221 | ||
222 | config IMX_DMA | 222 | config IMX_DMA |
223 | tristate "i.MX DMA support" | 223 | tristate "i.MX DMA support" |
224 | depends on ARCH_MX1 || ARCH_MX21 || MACH_MX27 | 224 | depends on IMX_HAVE_DMA_V1 |
225 | select DMA_ENGINE | 225 | select DMA_ENGINE |
226 | help | 226 | help |
227 | Support the i.MX DMA engine. This engine is integrated into | 227 | Support the i.MX DMA engine. This engine is integrated into |
228 | Freescale i.MX1/21/27 chips. | 228 | Freescale i.MX1/21/27 chips. |
229 | 229 | ||
230 | config MXS_DMA | ||
231 | bool "MXS DMA support" | ||
232 | depends on SOC_IMX23 || SOC_IMX28 | ||
233 | select DMA_ENGINE | ||
234 | help | ||
235 | Support the MXS DMA engine. This engine including APBH-DMA | ||
236 | and APBX-DMA is integrated into Freescale i.MX23/28 chips. | ||
237 | |||
230 | config DMA_ENGINE | 238 | config DMA_ENGINE |
231 | bool | 239 | bool |
232 | 240 | ||
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 1be065a62f8c..836095ab3c5c 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -19,6 +19,7 @@ obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o | |||
19 | obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ | 19 | obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ |
20 | obj-$(CONFIG_IMX_SDMA) += imx-sdma.o | 20 | obj-$(CONFIG_IMX_SDMA) += imx-sdma.o |
21 | obj-$(CONFIG_IMX_DMA) += imx-dma.o | 21 | obj-$(CONFIG_IMX_DMA) += imx-dma.o |
22 | obj-$(CONFIG_MXS_DMA) += mxs-dma.o | ||
22 | obj-$(CONFIG_TIMB_DMA) += timb_dma.o | 23 | obj-$(CONFIG_TIMB_DMA) += timb_dma.o |
23 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o | 24 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o |
24 | obj-$(CONFIG_PL330_DMA) += pl330.o | 25 | obj-$(CONFIG_PL330_DMA) += pl330.o |
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 5589358b684d..e0888cb538d4 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
@@ -54,6 +54,11 @@ module_param(pq_sources, uint, S_IRUGO); | |||
54 | MODULE_PARM_DESC(pq_sources, | 54 | MODULE_PARM_DESC(pq_sources, |
55 | "Number of p+q source buffers (default: 3)"); | 55 | "Number of p+q source buffers (default: 3)"); |
56 | 56 | ||
57 | static int timeout = 3000; | ||
58 | module_param(timeout, uint, S_IRUGO); | ||
59 | MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), \ | ||
60 | Pass -1 for infinite timeout"); | ||
61 | |||
57 | /* | 62 | /* |
58 | * Initialization patterns. All bytes in the source buffer has bit 7 | 63 | * Initialization patterns. All bytes in the source buffer has bit 7 |
59 | * set, all bytes in the destination buffer has bit 7 cleared. | 64 | * set, all bytes in the destination buffer has bit 7 cleared. |
@@ -285,7 +290,12 @@ static int dmatest_func(void *data) | |||
285 | 290 | ||
286 | set_user_nice(current, 10); | 291 | set_user_nice(current, 10); |
287 | 292 | ||
288 | flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT; | 293 | /* |
294 | * src buffers are freed by the DMAEngine code with dma_unmap_single() | ||
295 | * dst buffers are freed by ourselves below | ||
296 | */ | ||
297 | flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | ||
298 | | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE; | ||
289 | 299 | ||
290 | while (!kthread_should_stop() | 300 | while (!kthread_should_stop() |
291 | && !(iterations && total_tests >= iterations)) { | 301 | && !(iterations && total_tests >= iterations)) { |
@@ -294,7 +304,7 @@ static int dmatest_func(void *data) | |||
294 | dma_addr_t dma_srcs[src_cnt]; | 304 | dma_addr_t dma_srcs[src_cnt]; |
295 | dma_addr_t dma_dsts[dst_cnt]; | 305 | dma_addr_t dma_dsts[dst_cnt]; |
296 | struct completion cmp; | 306 | struct completion cmp; |
297 | unsigned long tmo = msecs_to_jiffies(3000); | 307 | unsigned long tmo = msecs_to_jiffies(timeout); |
298 | u8 align = 0; | 308 | u8 align = 0; |
299 | 309 | ||
300 | total_tests++; | 310 | total_tests++; |
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index a3991ab0d67e..9c25c7d099e4 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c | |||
@@ -32,26 +32,30 @@ | |||
32 | * which does not support descriptor writeback. | 32 | * which does not support descriptor writeback. |
33 | */ | 33 | */ |
34 | 34 | ||
35 | /* NOTE: DMS+SMS is system-specific. We should get this information | 35 | #define DWC_DEFAULT_CTLLO(private) ({ \ |
36 | * from the platform code somehow. | 36 | struct dw_dma_slave *__slave = (private); \ |
37 | */ | 37 | int dms = __slave ? __slave->dst_master : 0; \ |
38 | #define DWC_DEFAULT_CTLLO (DWC_CTLL_DST_MSIZE(0) \ | 38 | int sms = __slave ? __slave->src_master : 1; \ |
39 | | DWC_CTLL_SRC_MSIZE(0) \ | 39 | u8 smsize = __slave ? __slave->src_msize : DW_DMA_MSIZE_16; \ |
40 | | DWC_CTLL_DMS(0) \ | 40 | u8 dmsize = __slave ? __slave->dst_msize : DW_DMA_MSIZE_16; \ |
41 | | DWC_CTLL_SMS(1) \ | 41 | \ |
42 | | DWC_CTLL_LLP_D_EN \ | 42 | (DWC_CTLL_DST_MSIZE(dmsize) \ |
43 | | DWC_CTLL_LLP_S_EN) | 43 | | DWC_CTLL_SRC_MSIZE(smsize) \ |
44 | | DWC_CTLL_LLP_D_EN \ | ||
45 | | DWC_CTLL_LLP_S_EN \ | ||
46 | | DWC_CTLL_DMS(dms) \ | ||
47 | | DWC_CTLL_SMS(sms)); \ | ||
48 | }) | ||
44 | 49 | ||
45 | /* | 50 | /* |
46 | * This is configuration-dependent and usually a funny size like 4095. | 51 | * This is configuration-dependent and usually a funny size like 4095. |
47 | * Let's round it down to the nearest power of two. | ||
48 | * | 52 | * |
49 | * Note that this is a transfer count, i.e. if we transfer 32-bit | 53 | * Note that this is a transfer count, i.e. if we transfer 32-bit |
50 | * words, we can do 8192 bytes per descriptor. | 54 | * words, we can do 16380 bytes per descriptor. |
51 | * | 55 | * |
52 | * This parameter is also system-specific. | 56 | * This parameter is also system-specific. |
53 | */ | 57 | */ |
54 | #define DWC_MAX_COUNT 2048U | 58 | #define DWC_MAX_COUNT 4095U |
55 | 59 | ||
56 | /* | 60 | /* |
57 | * Number of descriptors to allocate for each channel. This should be | 61 | * Number of descriptors to allocate for each channel. This should be |
@@ -84,11 +88,6 @@ static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) | |||
84 | return list_entry(dwc->active_list.next, struct dw_desc, desc_node); | 88 | return list_entry(dwc->active_list.next, struct dw_desc, desc_node); |
85 | } | 89 | } |
86 | 90 | ||
87 | static struct dw_desc *dwc_first_queued(struct dw_dma_chan *dwc) | ||
88 | { | ||
89 | return list_entry(dwc->queue.next, struct dw_desc, desc_node); | ||
90 | } | ||
91 | |||
92 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) | 91 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) |
93 | { | 92 | { |
94 | struct dw_desc *desc, *_desc; | 93 | struct dw_desc *desc, *_desc; |
@@ -201,6 +200,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
201 | dma_async_tx_callback callback; | 200 | dma_async_tx_callback callback; |
202 | void *param; | 201 | void *param; |
203 | struct dma_async_tx_descriptor *txd = &desc->txd; | 202 | struct dma_async_tx_descriptor *txd = &desc->txd; |
203 | struct dw_desc *child; | ||
204 | 204 | ||
205 | dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); | 205 | dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); |
206 | 206 | ||
@@ -209,6 +209,12 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
209 | param = txd->callback_param; | 209 | param = txd->callback_param; |
210 | 210 | ||
211 | dwc_sync_desc_for_cpu(dwc, desc); | 211 | dwc_sync_desc_for_cpu(dwc, desc); |
212 | |||
213 | /* async_tx_ack */ | ||
214 | list_for_each_entry(child, &desc->tx_list, desc_node) | ||
215 | async_tx_ack(&child->txd); | ||
216 | async_tx_ack(&desc->txd); | ||
217 | |||
212 | list_splice_init(&desc->tx_list, &dwc->free_list); | 218 | list_splice_init(&desc->tx_list, &dwc->free_list); |
213 | list_move(&desc->desc_node, &dwc->free_list); | 219 | list_move(&desc->desc_node, &dwc->free_list); |
214 | 220 | ||
@@ -259,10 +265,11 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
259 | * Submit queued descriptors ASAP, i.e. before we go through | 265 | * Submit queued descriptors ASAP, i.e. before we go through |
260 | * the completed ones. | 266 | * the completed ones. |
261 | */ | 267 | */ |
262 | if (!list_empty(&dwc->queue)) | ||
263 | dwc_dostart(dwc, dwc_first_queued(dwc)); | ||
264 | list_splice_init(&dwc->active_list, &list); | 268 | list_splice_init(&dwc->active_list, &list); |
265 | list_splice_init(&dwc->queue, &dwc->active_list); | 269 | if (!list_empty(&dwc->queue)) { |
270 | list_move(dwc->queue.next, &dwc->active_list); | ||
271 | dwc_dostart(dwc, dwc_first_active(dwc)); | ||
272 | } | ||
266 | 273 | ||
267 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | 274 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
268 | dwc_descriptor_complete(dwc, desc); | 275 | dwc_descriptor_complete(dwc, desc); |
@@ -291,6 +298,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
291 | return; | 298 | return; |
292 | } | 299 | } |
293 | 300 | ||
301 | if (list_empty(&dwc->active_list)) | ||
302 | return; | ||
303 | |||
294 | dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); | 304 | dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); |
295 | 305 | ||
296 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { | 306 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { |
@@ -319,8 +329,8 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
319 | cpu_relax(); | 329 | cpu_relax(); |
320 | 330 | ||
321 | if (!list_empty(&dwc->queue)) { | 331 | if (!list_empty(&dwc->queue)) { |
322 | dwc_dostart(dwc, dwc_first_queued(dwc)); | 332 | list_move(dwc->queue.next, &dwc->active_list); |
323 | list_splice_init(&dwc->queue, &dwc->active_list); | 333 | dwc_dostart(dwc, dwc_first_active(dwc)); |
324 | } | 334 | } |
325 | } | 335 | } |
326 | 336 | ||
@@ -346,7 +356,7 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
346 | */ | 356 | */ |
347 | bad_desc = dwc_first_active(dwc); | 357 | bad_desc = dwc_first_active(dwc); |
348 | list_del_init(&bad_desc->desc_node); | 358 | list_del_init(&bad_desc->desc_node); |
349 | list_splice_init(&dwc->queue, dwc->active_list.prev); | 359 | list_move(dwc->queue.next, dwc->active_list.prev); |
350 | 360 | ||
351 | /* Clear the error flag and try to restart the controller */ | 361 | /* Clear the error flag and try to restart the controller */ |
352 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | 362 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
@@ -541,8 +551,8 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | |||
541 | if (list_empty(&dwc->active_list)) { | 551 | if (list_empty(&dwc->active_list)) { |
542 | dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", | 552 | dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", |
543 | desc->txd.cookie); | 553 | desc->txd.cookie); |
544 | dwc_dostart(dwc, desc); | ||
545 | list_add_tail(&desc->desc_node, &dwc->active_list); | 554 | list_add_tail(&desc->desc_node, &dwc->active_list); |
555 | dwc_dostart(dwc, dwc_first_active(dwc)); | ||
546 | } else { | 556 | } else { |
547 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", | 557 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", |
548 | desc->txd.cookie); | 558 | desc->txd.cookie); |
@@ -581,14 +591,16 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
581 | * We can be a lot more clever here, but this should take care | 591 | * We can be a lot more clever here, but this should take care |
582 | * of the most common optimization. | 592 | * of the most common optimization. |
583 | */ | 593 | */ |
584 | if (!((src | dest | len) & 3)) | 594 | if (!((src | dest | len) & 7)) |
595 | src_width = dst_width = 3; | ||
596 | else if (!((src | dest | len) & 3)) | ||
585 | src_width = dst_width = 2; | 597 | src_width = dst_width = 2; |
586 | else if (!((src | dest | len) & 1)) | 598 | else if (!((src | dest | len) & 1)) |
587 | src_width = dst_width = 1; | 599 | src_width = dst_width = 1; |
588 | else | 600 | else |
589 | src_width = dst_width = 0; | 601 | src_width = dst_width = 0; |
590 | 602 | ||
591 | ctllo = DWC_DEFAULT_CTLLO | 603 | ctllo = DWC_DEFAULT_CTLLO(chan->private) |
592 | | DWC_CTLL_DST_WIDTH(dst_width) | 604 | | DWC_CTLL_DST_WIDTH(dst_width) |
593 | | DWC_CTLL_SRC_WIDTH(src_width) | 605 | | DWC_CTLL_SRC_WIDTH(src_width) |
594 | | DWC_CTLL_DST_INC | 606 | | DWC_CTLL_DST_INC |
@@ -669,11 +681,11 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
669 | 681 | ||
670 | switch (direction) { | 682 | switch (direction) { |
671 | case DMA_TO_DEVICE: | 683 | case DMA_TO_DEVICE: |
672 | ctllo = (DWC_DEFAULT_CTLLO | 684 | ctllo = (DWC_DEFAULT_CTLLO(chan->private) |
673 | | DWC_CTLL_DST_WIDTH(reg_width) | 685 | | DWC_CTLL_DST_WIDTH(reg_width) |
674 | | DWC_CTLL_DST_FIX | 686 | | DWC_CTLL_DST_FIX |
675 | | DWC_CTLL_SRC_INC | 687 | | DWC_CTLL_SRC_INC |
676 | | DWC_CTLL_FC_M2P); | 688 | | DWC_CTLL_FC(dws->fc)); |
677 | reg = dws->tx_reg; | 689 | reg = dws->tx_reg; |
678 | for_each_sg(sgl, sg, sg_len, i) { | 690 | for_each_sg(sgl, sg, sg_len, i) { |
679 | struct dw_desc *desc; | 691 | struct dw_desc *desc; |
@@ -714,11 +726,11 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
714 | } | 726 | } |
715 | break; | 727 | break; |
716 | case DMA_FROM_DEVICE: | 728 | case DMA_FROM_DEVICE: |
717 | ctllo = (DWC_DEFAULT_CTLLO | 729 | ctllo = (DWC_DEFAULT_CTLLO(chan->private) |
718 | | DWC_CTLL_SRC_WIDTH(reg_width) | 730 | | DWC_CTLL_SRC_WIDTH(reg_width) |
719 | | DWC_CTLL_DST_INC | 731 | | DWC_CTLL_DST_INC |
720 | | DWC_CTLL_SRC_FIX | 732 | | DWC_CTLL_SRC_FIX |
721 | | DWC_CTLL_FC_P2M); | 733 | | DWC_CTLL_FC(dws->fc)); |
722 | 734 | ||
723 | reg = dws->rx_reg; | 735 | reg = dws->rx_reg; |
724 | for_each_sg(sgl, sg, sg_len, i) { | 736 | for_each_sg(sgl, sg, sg_len, i) { |
@@ -834,7 +846,9 @@ dwc_tx_status(struct dma_chan *chan, | |||
834 | 846 | ||
835 | ret = dma_async_is_complete(cookie, last_complete, last_used); | 847 | ret = dma_async_is_complete(cookie, last_complete, last_used); |
836 | if (ret != DMA_SUCCESS) { | 848 | if (ret != DMA_SUCCESS) { |
849 | spin_lock_bh(&dwc->lock); | ||
837 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | 850 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); |
851 | spin_unlock_bh(&dwc->lock); | ||
838 | 852 | ||
839 | last_complete = dwc->completed; | 853 | last_complete = dwc->completed; |
840 | last_used = chan->cookie; | 854 | last_used = chan->cookie; |
@@ -889,8 +903,11 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
889 | BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); | 903 | BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); |
890 | 904 | ||
891 | cfghi = dws->cfg_hi; | 905 | cfghi = dws->cfg_hi; |
892 | cfglo = dws->cfg_lo; | 906 | cfglo = dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; |
893 | } | 907 | } |
908 | |||
909 | cfglo |= DWC_CFGL_CH_PRIOR(dwc->priority); | ||
910 | |||
894 | channel_writel(dwc, CFG_LO, cfglo); | 911 | channel_writel(dwc, CFG_LO, cfglo); |
895 | channel_writel(dwc, CFG_HI, cfghi); | 912 | channel_writel(dwc, CFG_HI, cfghi); |
896 | 913 | ||
@@ -1126,23 +1143,23 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1126 | case DMA_TO_DEVICE: | 1143 | case DMA_TO_DEVICE: |
1127 | desc->lli.dar = dws->tx_reg; | 1144 | desc->lli.dar = dws->tx_reg; |
1128 | desc->lli.sar = buf_addr + (period_len * i); | 1145 | desc->lli.sar = buf_addr + (period_len * i); |
1129 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO | 1146 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) |
1130 | | DWC_CTLL_DST_WIDTH(reg_width) | 1147 | | DWC_CTLL_DST_WIDTH(reg_width) |
1131 | | DWC_CTLL_SRC_WIDTH(reg_width) | 1148 | | DWC_CTLL_SRC_WIDTH(reg_width) |
1132 | | DWC_CTLL_DST_FIX | 1149 | | DWC_CTLL_DST_FIX |
1133 | | DWC_CTLL_SRC_INC | 1150 | | DWC_CTLL_SRC_INC |
1134 | | DWC_CTLL_FC_M2P | 1151 | | DWC_CTLL_FC(dws->fc) |
1135 | | DWC_CTLL_INT_EN); | 1152 | | DWC_CTLL_INT_EN); |
1136 | break; | 1153 | break; |
1137 | case DMA_FROM_DEVICE: | 1154 | case DMA_FROM_DEVICE: |
1138 | desc->lli.dar = buf_addr + (period_len * i); | 1155 | desc->lli.dar = buf_addr + (period_len * i); |
1139 | desc->lli.sar = dws->rx_reg; | 1156 | desc->lli.sar = dws->rx_reg; |
1140 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO | 1157 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) |
1141 | | DWC_CTLL_SRC_WIDTH(reg_width) | 1158 | | DWC_CTLL_SRC_WIDTH(reg_width) |
1142 | | DWC_CTLL_DST_WIDTH(reg_width) | 1159 | | DWC_CTLL_DST_WIDTH(reg_width) |
1143 | | DWC_CTLL_DST_INC | 1160 | | DWC_CTLL_DST_INC |
1144 | | DWC_CTLL_SRC_FIX | 1161 | | DWC_CTLL_SRC_FIX |
1145 | | DWC_CTLL_FC_P2M | 1162 | | DWC_CTLL_FC(dws->fc) |
1146 | | DWC_CTLL_INT_EN); | 1163 | | DWC_CTLL_INT_EN); |
1147 | break; | 1164 | break; |
1148 | default: | 1165 | default: |
@@ -1307,7 +1324,17 @@ static int __init dw_probe(struct platform_device *pdev) | |||
1307 | dwc->chan.device = &dw->dma; | 1324 | dwc->chan.device = &dw->dma; |
1308 | dwc->chan.cookie = dwc->completed = 1; | 1325 | dwc->chan.cookie = dwc->completed = 1; |
1309 | dwc->chan.chan_id = i; | 1326 | dwc->chan.chan_id = i; |
1310 | list_add_tail(&dwc->chan.device_node, &dw->dma.channels); | 1327 | if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) |
1328 | list_add_tail(&dwc->chan.device_node, | ||
1329 | &dw->dma.channels); | ||
1330 | else | ||
1331 | list_add(&dwc->chan.device_node, &dw->dma.channels); | ||
1332 | |||
1333 | /* 7 is highest priority & 0 is lowest. */ | ||
1334 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) | ||
1335 | dwc->priority = 7 - i; | ||
1336 | else | ||
1337 | dwc->priority = i; | ||
1311 | 1338 | ||
1312 | dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; | 1339 | dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; |
1313 | spin_lock_init(&dwc->lock); | 1340 | spin_lock_init(&dwc->lock); |
@@ -1335,6 +1362,8 @@ static int __init dw_probe(struct platform_device *pdev) | |||
1335 | 1362 | ||
1336 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); | 1363 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); |
1337 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); | 1364 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); |
1365 | if (pdata->is_private) | ||
1366 | dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); | ||
1338 | dw->dma.dev = &pdev->dev; | 1367 | dw->dma.dev = &pdev->dev; |
1339 | dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; | 1368 | dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; |
1340 | dw->dma.device_free_chan_resources = dwc_free_chan_resources; | 1369 | dw->dma.device_free_chan_resources = dwc_free_chan_resources; |
@@ -1447,7 +1476,7 @@ static int __init dw_init(void) | |||
1447 | { | 1476 | { |
1448 | return platform_driver_probe(&dw_driver, dw_probe); | 1477 | return platform_driver_probe(&dw_driver, dw_probe); |
1449 | } | 1478 | } |
1450 | module_init(dw_init); | 1479 | subsys_initcall(dw_init); |
1451 | 1480 | ||
1452 | static void __exit dw_exit(void) | 1481 | static void __exit dw_exit(void) |
1453 | { | 1482 | { |
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index d9a939f67f46..720f821527f8 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h | |||
@@ -86,6 +86,7 @@ struct dw_dma_regs { | |||
86 | #define DWC_CTLL_SRC_MSIZE(n) ((n)<<14) | 86 | #define DWC_CTLL_SRC_MSIZE(n) ((n)<<14) |
87 | #define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */ | 87 | #define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */ |
88 | #define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */ | 88 | #define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */ |
89 | #define DWC_CTLL_FC(n) ((n) << 20) | ||
89 | #define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */ | 90 | #define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */ |
90 | #define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */ | 91 | #define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */ |
91 | #define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */ | 92 | #define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */ |
@@ -101,6 +102,8 @@ struct dw_dma_regs { | |||
101 | #define DWC_CTLH_BLOCK_TS_MASK 0x00000fff | 102 | #define DWC_CTLH_BLOCK_TS_MASK 0x00000fff |
102 | 103 | ||
103 | /* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */ | 104 | /* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */ |
105 | #define DWC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */ | ||
106 | #define DWC_CFGL_CH_PRIOR(x) ((x) << 5) /* priority */ | ||
104 | #define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */ | 107 | #define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */ |
105 | #define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */ | 108 | #define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */ |
106 | #define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */ | 109 | #define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */ |
@@ -134,6 +137,7 @@ struct dw_dma_chan { | |||
134 | struct dma_chan chan; | 137 | struct dma_chan chan; |
135 | void __iomem *ch_regs; | 138 | void __iomem *ch_regs; |
136 | u8 mask; | 139 | u8 mask; |
140 | u8 priority; | ||
137 | 141 | ||
138 | spinlock_t lock; | 142 | spinlock_t lock; |
139 | 143 | ||
@@ -155,9 +159,9 @@ __dwc_regs(struct dw_dma_chan *dwc) | |||
155 | } | 159 | } |
156 | 160 | ||
157 | #define channel_readl(dwc, name) \ | 161 | #define channel_readl(dwc, name) \ |
158 | __raw_readl(&(__dwc_regs(dwc)->name)) | 162 | readl(&(__dwc_regs(dwc)->name)) |
159 | #define channel_writel(dwc, name, val) \ | 163 | #define channel_writel(dwc, name, val) \ |
160 | __raw_writel((val), &(__dwc_regs(dwc)->name)) | 164 | writel((val), &(__dwc_regs(dwc)->name)) |
161 | 165 | ||
162 | static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) | 166 | static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) |
163 | { | 167 | { |
@@ -181,9 +185,9 @@ static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw) | |||
181 | } | 185 | } |
182 | 186 | ||
183 | #define dma_readl(dw, name) \ | 187 | #define dma_readl(dw, name) \ |
184 | __raw_readl(&(__dw_regs(dw)->name)) | 188 | readl(&(__dw_regs(dw)->name)) |
185 | #define dma_writel(dw, name, val) \ | 189 | #define dma_writel(dw, name, val) \ |
186 | __raw_writel((val), &(__dw_regs(dw)->name)) | 190 | writel((val), &(__dw_regs(dw)->name)) |
187 | 191 | ||
188 | #define channel_set_bit(dw, reg, mask) \ | 192 | #define channel_set_bit(dw, reg, mask) \ |
189 | dma_writel(dw, reg, ((mask) << 8) | (mask)) | 193 | dma_writel(dw, reg, ((mask) << 8) | (mask)) |
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index e3854a8f0de0..6b396759e7f5 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -37,35 +37,16 @@ | |||
37 | 37 | ||
38 | #include "fsldma.h" | 38 | #include "fsldma.h" |
39 | 39 | ||
40 | static const char msg_ld_oom[] = "No free memory for link descriptor\n"; | 40 | #define chan_dbg(chan, fmt, arg...) \ |
41 | dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg) | ||
42 | #define chan_err(chan, fmt, arg...) \ | ||
43 | dev_err(chan->dev, "%s: " fmt, chan->name, ##arg) | ||
41 | 44 | ||
42 | static void dma_init(struct fsldma_chan *chan) | 45 | static const char msg_ld_oom[] = "No free memory for link descriptor"; |
43 | { | ||
44 | /* Reset the channel */ | ||
45 | DMA_OUT(chan, &chan->regs->mr, 0, 32); | ||
46 | 46 | ||
47 | switch (chan->feature & FSL_DMA_IP_MASK) { | 47 | /* |
48 | case FSL_DMA_IP_85XX: | 48 | * Register Helpers |
49 | /* Set the channel to below modes: | 49 | */ |
50 | * EIE - Error interrupt enable | ||
51 | * EOSIE - End of segments interrupt enable (basic mode) | ||
52 | * EOLNIE - End of links interrupt enable | ||
53 | * BWC - Bandwidth sharing among channels | ||
54 | */ | ||
55 | DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC | ||
56 | | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE | ||
57 | | FSL_DMA_MR_EOSIE, 32); | ||
58 | break; | ||
59 | case FSL_DMA_IP_83XX: | ||
60 | /* Set the channel to below modes: | ||
61 | * EOTIE - End-of-transfer interrupt enable | ||
62 | * PRC_RM - PCI read multiple | ||
63 | */ | ||
64 | DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE | ||
65 | | FSL_DMA_MR_PRC_RM, 32); | ||
66 | break; | ||
67 | } | ||
68 | } | ||
69 | 50 | ||
70 | static void set_sr(struct fsldma_chan *chan, u32 val) | 51 | static void set_sr(struct fsldma_chan *chan, u32 val) |
71 | { | 52 | { |
@@ -77,14 +58,38 @@ static u32 get_sr(struct fsldma_chan *chan) | |||
77 | return DMA_IN(chan, &chan->regs->sr, 32); | 58 | return DMA_IN(chan, &chan->regs->sr, 32); |
78 | } | 59 | } |
79 | 60 | ||
61 | static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) | ||
62 | { | ||
63 | DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); | ||
64 | } | ||
65 | |||
66 | static dma_addr_t get_cdar(struct fsldma_chan *chan) | ||
67 | { | ||
68 | return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; | ||
69 | } | ||
70 | |||
71 | static u32 get_bcr(struct fsldma_chan *chan) | ||
72 | { | ||
73 | return DMA_IN(chan, &chan->regs->bcr, 32); | ||
74 | } | ||
75 | |||
76 | /* | ||
77 | * Descriptor Helpers | ||
78 | */ | ||
79 | |||
80 | static void set_desc_cnt(struct fsldma_chan *chan, | 80 | static void set_desc_cnt(struct fsldma_chan *chan, |
81 | struct fsl_dma_ld_hw *hw, u32 count) | 81 | struct fsl_dma_ld_hw *hw, u32 count) |
82 | { | 82 | { |
83 | hw->count = CPU_TO_DMA(chan, count, 32); | 83 | hw->count = CPU_TO_DMA(chan, count, 32); |
84 | } | 84 | } |
85 | 85 | ||
86 | static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc) | ||
87 | { | ||
88 | return DMA_TO_CPU(chan, desc->hw.count, 32); | ||
89 | } | ||
90 | |||
86 | static void set_desc_src(struct fsldma_chan *chan, | 91 | static void set_desc_src(struct fsldma_chan *chan, |
87 | struct fsl_dma_ld_hw *hw, dma_addr_t src) | 92 | struct fsl_dma_ld_hw *hw, dma_addr_t src) |
88 | { | 93 | { |
89 | u64 snoop_bits; | 94 | u64 snoop_bits; |
90 | 95 | ||
@@ -93,8 +98,18 @@ static void set_desc_src(struct fsldma_chan *chan, | |||
93 | hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); | 98 | hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); |
94 | } | 99 | } |
95 | 100 | ||
101 | static dma_addr_t get_desc_src(struct fsldma_chan *chan, | ||
102 | struct fsl_desc_sw *desc) | ||
103 | { | ||
104 | u64 snoop_bits; | ||
105 | |||
106 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | ||
107 | ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; | ||
108 | return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits; | ||
109 | } | ||
110 | |||
96 | static void set_desc_dst(struct fsldma_chan *chan, | 111 | static void set_desc_dst(struct fsldma_chan *chan, |
97 | struct fsl_dma_ld_hw *hw, dma_addr_t dst) | 112 | struct fsl_dma_ld_hw *hw, dma_addr_t dst) |
98 | { | 113 | { |
99 | u64 snoop_bits; | 114 | u64 snoop_bits; |
100 | 115 | ||
@@ -103,8 +118,18 @@ static void set_desc_dst(struct fsldma_chan *chan, | |||
103 | hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); | 118 | hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); |
104 | } | 119 | } |
105 | 120 | ||
121 | static dma_addr_t get_desc_dst(struct fsldma_chan *chan, | ||
122 | struct fsl_desc_sw *desc) | ||
123 | { | ||
124 | u64 snoop_bits; | ||
125 | |||
126 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | ||
127 | ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; | ||
128 | return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits; | ||
129 | } | ||
130 | |||
106 | static void set_desc_next(struct fsldma_chan *chan, | 131 | static void set_desc_next(struct fsldma_chan *chan, |
107 | struct fsl_dma_ld_hw *hw, dma_addr_t next) | 132 | struct fsl_dma_ld_hw *hw, dma_addr_t next) |
108 | { | 133 | { |
109 | u64 snoop_bits; | 134 | u64 snoop_bits; |
110 | 135 | ||
@@ -113,24 +138,46 @@ static void set_desc_next(struct fsldma_chan *chan, | |||
113 | hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64); | 138 | hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64); |
114 | } | 139 | } |
115 | 140 | ||
116 | static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) | 141 | static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc) |
117 | { | 142 | { |
118 | DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); | 143 | u64 snoop_bits; |
119 | } | ||
120 | 144 | ||
121 | static dma_addr_t get_cdar(struct fsldma_chan *chan) | 145 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) |
122 | { | 146 | ? FSL_DMA_SNEN : 0; |
123 | return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; | ||
124 | } | ||
125 | 147 | ||
126 | static dma_addr_t get_ndar(struct fsldma_chan *chan) | 148 | desc->hw.next_ln_addr = CPU_TO_DMA(chan, |
127 | { | 149 | DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL |
128 | return DMA_IN(chan, &chan->regs->ndar, 64); | 150 | | snoop_bits, 64); |
129 | } | 151 | } |
130 | 152 | ||
131 | static u32 get_bcr(struct fsldma_chan *chan) | 153 | /* |
154 | * DMA Engine Hardware Control Helpers | ||
155 | */ | ||
156 | |||
157 | static void dma_init(struct fsldma_chan *chan) | ||
132 | { | 158 | { |
133 | return DMA_IN(chan, &chan->regs->bcr, 32); | 159 | /* Reset the channel */ |
160 | DMA_OUT(chan, &chan->regs->mr, 0, 32); | ||
161 | |||
162 | switch (chan->feature & FSL_DMA_IP_MASK) { | ||
163 | case FSL_DMA_IP_85XX: | ||
164 | /* Set the channel to below modes: | ||
165 | * EIE - Error interrupt enable | ||
166 | * EOLNIE - End of links interrupt enable | ||
167 | * BWC - Bandwidth sharing among channels | ||
168 | */ | ||
169 | DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC | ||
170 | | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE, 32); | ||
171 | break; | ||
172 | case FSL_DMA_IP_83XX: | ||
173 | /* Set the channel to below modes: | ||
174 | * EOTIE - End-of-transfer interrupt enable | ||
175 | * PRC_RM - PCI read multiple | ||
176 | */ | ||
177 | DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE | ||
178 | | FSL_DMA_MR_PRC_RM, 32); | ||
179 | break; | ||
180 | } | ||
134 | } | 181 | } |
135 | 182 | ||
136 | static int dma_is_idle(struct fsldma_chan *chan) | 183 | static int dma_is_idle(struct fsldma_chan *chan) |
@@ -139,25 +186,32 @@ static int dma_is_idle(struct fsldma_chan *chan) | |||
139 | return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); | 186 | return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); |
140 | } | 187 | } |
141 | 188 | ||
189 | /* | ||
190 | * Start the DMA controller | ||
191 | * | ||
192 | * Preconditions: | ||
193 | * - the CDAR register must point to the start descriptor | ||
194 | * - the MRn[CS] bit must be cleared | ||
195 | */ | ||
142 | static void dma_start(struct fsldma_chan *chan) | 196 | static void dma_start(struct fsldma_chan *chan) |
143 | { | 197 | { |
144 | u32 mode; | 198 | u32 mode; |
145 | 199 | ||
146 | mode = DMA_IN(chan, &chan->regs->mr, 32); | 200 | mode = DMA_IN(chan, &chan->regs->mr, 32); |
147 | 201 | ||
148 | if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { | 202 | if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { |
149 | if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { | 203 | DMA_OUT(chan, &chan->regs->bcr, 0, 32); |
150 | DMA_OUT(chan, &chan->regs->bcr, 0, 32); | 204 | mode |= FSL_DMA_MR_EMP_EN; |
151 | mode |= FSL_DMA_MR_EMP_EN; | 205 | } else { |
152 | } else { | 206 | mode &= ~FSL_DMA_MR_EMP_EN; |
153 | mode &= ~FSL_DMA_MR_EMP_EN; | ||
154 | } | ||
155 | } | 207 | } |
156 | 208 | ||
157 | if (chan->feature & FSL_DMA_CHAN_START_EXT) | 209 | if (chan->feature & FSL_DMA_CHAN_START_EXT) { |
158 | mode |= FSL_DMA_MR_EMS_EN; | 210 | mode |= FSL_DMA_MR_EMS_EN; |
159 | else | 211 | } else { |
212 | mode &= ~FSL_DMA_MR_EMS_EN; | ||
160 | mode |= FSL_DMA_MR_CS; | 213 | mode |= FSL_DMA_MR_CS; |
214 | } | ||
161 | 215 | ||
162 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | 216 | DMA_OUT(chan, &chan->regs->mr, mode, 32); |
163 | } | 217 | } |
@@ -167,13 +221,26 @@ static void dma_halt(struct fsldma_chan *chan) | |||
167 | u32 mode; | 221 | u32 mode; |
168 | int i; | 222 | int i; |
169 | 223 | ||
224 | /* read the mode register */ | ||
170 | mode = DMA_IN(chan, &chan->regs->mr, 32); | 225 | mode = DMA_IN(chan, &chan->regs->mr, 32); |
171 | mode |= FSL_DMA_MR_CA; | ||
172 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | ||
173 | 226 | ||
174 | mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA); | 227 | /* |
228 | * The 85xx controller supports channel abort, which will stop | ||
229 | * the current transfer. On 83xx, this bit is the transfer error | ||
230 | * mask bit, which should not be changed. | ||
231 | */ | ||
232 | if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { | ||
233 | mode |= FSL_DMA_MR_CA; | ||
234 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | ||
235 | |||
236 | mode &= ~FSL_DMA_MR_CA; | ||
237 | } | ||
238 | |||
239 | /* stop the DMA controller */ | ||
240 | mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN); | ||
175 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | 241 | DMA_OUT(chan, &chan->regs->mr, mode, 32); |
176 | 242 | ||
243 | /* wait for the DMA controller to become idle */ | ||
177 | for (i = 0; i < 100; i++) { | 244 | for (i = 0; i < 100; i++) { |
178 | if (dma_is_idle(chan)) | 245 | if (dma_is_idle(chan)) |
179 | return; | 246 | return; |
@@ -182,20 +249,7 @@ static void dma_halt(struct fsldma_chan *chan) | |||
182 | } | 249 | } |
183 | 250 | ||
184 | if (!dma_is_idle(chan)) | 251 | if (!dma_is_idle(chan)) |
185 | dev_err(chan->dev, "DMA halt timeout!\n"); | 252 | chan_err(chan, "DMA halt timeout!\n"); |
186 | } | ||
187 | |||
188 | static void set_ld_eol(struct fsldma_chan *chan, | ||
189 | struct fsl_desc_sw *desc) | ||
190 | { | ||
191 | u64 snoop_bits; | ||
192 | |||
193 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) | ||
194 | ? FSL_DMA_SNEN : 0; | ||
195 | |||
196 | desc->hw.next_ln_addr = CPU_TO_DMA(chan, | ||
197 | DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL | ||
198 | | snoop_bits, 64); | ||
199 | } | 253 | } |
200 | 254 | ||
201 | /** | 255 | /** |
@@ -321,8 +375,7 @@ static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable) | |||
321 | chan->feature &= ~FSL_DMA_CHAN_START_EXT; | 375 | chan->feature &= ~FSL_DMA_CHAN_START_EXT; |
322 | } | 376 | } |
323 | 377 | ||
324 | static void append_ld_queue(struct fsldma_chan *chan, | 378 | static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc) |
325 | struct fsl_desc_sw *desc) | ||
326 | { | 379 | { |
327 | struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); | 380 | struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); |
328 | 381 | ||
@@ -363,8 +416,8 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
363 | cookie = chan->common.cookie; | 416 | cookie = chan->common.cookie; |
364 | list_for_each_entry(child, &desc->tx_list, node) { | 417 | list_for_each_entry(child, &desc->tx_list, node) { |
365 | cookie++; | 418 | cookie++; |
366 | if (cookie < 0) | 419 | if (cookie < DMA_MIN_COOKIE) |
367 | cookie = 1; | 420 | cookie = DMA_MIN_COOKIE; |
368 | 421 | ||
369 | child->async_tx.cookie = cookie; | 422 | child->async_tx.cookie = cookie; |
370 | } | 423 | } |
@@ -385,15 +438,14 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
385 | * | 438 | * |
386 | * Return - The descriptor allocated. NULL for failed. | 439 | * Return - The descriptor allocated. NULL for failed. |
387 | */ | 440 | */ |
388 | static struct fsl_desc_sw *fsl_dma_alloc_descriptor( | 441 | static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan) |
389 | struct fsldma_chan *chan) | ||
390 | { | 442 | { |
391 | struct fsl_desc_sw *desc; | 443 | struct fsl_desc_sw *desc; |
392 | dma_addr_t pdesc; | 444 | dma_addr_t pdesc; |
393 | 445 | ||
394 | desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); | 446 | desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); |
395 | if (!desc) { | 447 | if (!desc) { |
396 | dev_dbg(chan->dev, "out of memory for link desc\n"); | 448 | chan_dbg(chan, "out of memory for link descriptor\n"); |
397 | return NULL; | 449 | return NULL; |
398 | } | 450 | } |
399 | 451 | ||
@@ -403,10 +455,13 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor( | |||
403 | desc->async_tx.tx_submit = fsl_dma_tx_submit; | 455 | desc->async_tx.tx_submit = fsl_dma_tx_submit; |
404 | desc->async_tx.phys = pdesc; | 456 | desc->async_tx.phys = pdesc; |
405 | 457 | ||
458 | #ifdef FSL_DMA_LD_DEBUG | ||
459 | chan_dbg(chan, "LD %p allocated\n", desc); | ||
460 | #endif | ||
461 | |||
406 | return desc; | 462 | return desc; |
407 | } | 463 | } |
408 | 464 | ||
409 | |||
410 | /** | 465 | /** |
411 | * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. | 466 | * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. |
412 | * @chan : Freescale DMA channel | 467 | * @chan : Freescale DMA channel |
@@ -427,13 +482,11 @@ static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan) | |||
427 | * We need the descriptor to be aligned to 32bytes | 482 | * We need the descriptor to be aligned to 32bytes |
428 | * for meeting FSL DMA specification requirement. | 483 | * for meeting FSL DMA specification requirement. |
429 | */ | 484 | */ |
430 | chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", | 485 | chan->desc_pool = dma_pool_create(chan->name, chan->dev, |
431 | chan->dev, | ||
432 | sizeof(struct fsl_desc_sw), | 486 | sizeof(struct fsl_desc_sw), |
433 | __alignof__(struct fsl_desc_sw), 0); | 487 | __alignof__(struct fsl_desc_sw), 0); |
434 | if (!chan->desc_pool) { | 488 | if (!chan->desc_pool) { |
435 | dev_err(chan->dev, "unable to allocate channel %d " | 489 | chan_err(chan, "unable to allocate descriptor pool\n"); |
436 | "descriptor pool\n", chan->id); | ||
437 | return -ENOMEM; | 490 | return -ENOMEM; |
438 | } | 491 | } |
439 | 492 | ||
@@ -455,6 +508,9 @@ static void fsldma_free_desc_list(struct fsldma_chan *chan, | |||
455 | 508 | ||
456 | list_for_each_entry_safe(desc, _desc, list, node) { | 509 | list_for_each_entry_safe(desc, _desc, list, node) { |
457 | list_del(&desc->node); | 510 | list_del(&desc->node); |
511 | #ifdef FSL_DMA_LD_DEBUG | ||
512 | chan_dbg(chan, "LD %p free\n", desc); | ||
513 | #endif | ||
458 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); | 514 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); |
459 | } | 515 | } |
460 | } | 516 | } |
@@ -466,6 +522,9 @@ static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan, | |||
466 | 522 | ||
467 | list_for_each_entry_safe_reverse(desc, _desc, list, node) { | 523 | list_for_each_entry_safe_reverse(desc, _desc, list, node) { |
468 | list_del(&desc->node); | 524 | list_del(&desc->node); |
525 | #ifdef FSL_DMA_LD_DEBUG | ||
526 | chan_dbg(chan, "LD %p free\n", desc); | ||
527 | #endif | ||
469 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); | 528 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); |
470 | } | 529 | } |
471 | } | 530 | } |
@@ -479,7 +538,7 @@ static void fsl_dma_free_chan_resources(struct dma_chan *dchan) | |||
479 | struct fsldma_chan *chan = to_fsl_chan(dchan); | 538 | struct fsldma_chan *chan = to_fsl_chan(dchan); |
480 | unsigned long flags; | 539 | unsigned long flags; |
481 | 540 | ||
482 | dev_dbg(chan->dev, "Free all channel resources.\n"); | 541 | chan_dbg(chan, "free all channel resources\n"); |
483 | spin_lock_irqsave(&chan->desc_lock, flags); | 542 | spin_lock_irqsave(&chan->desc_lock, flags); |
484 | fsldma_free_desc_list(chan, &chan->ld_pending); | 543 | fsldma_free_desc_list(chan, &chan->ld_pending); |
485 | fsldma_free_desc_list(chan, &chan->ld_running); | 544 | fsldma_free_desc_list(chan, &chan->ld_running); |
@@ -502,7 +561,7 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags) | |||
502 | 561 | ||
503 | new = fsl_dma_alloc_descriptor(chan); | 562 | new = fsl_dma_alloc_descriptor(chan); |
504 | if (!new) { | 563 | if (!new) { |
505 | dev_err(chan->dev, msg_ld_oom); | 564 | chan_err(chan, "%s\n", msg_ld_oom); |
506 | return NULL; | 565 | return NULL; |
507 | } | 566 | } |
508 | 567 | ||
@@ -512,14 +571,15 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags) | |||
512 | /* Insert the link descriptor to the LD ring */ | 571 | /* Insert the link descriptor to the LD ring */ |
513 | list_add_tail(&new->node, &new->tx_list); | 572 | list_add_tail(&new->node, &new->tx_list); |
514 | 573 | ||
515 | /* Set End-of-link to the last link descriptor of new list*/ | 574 | /* Set End-of-link to the last link descriptor of new list */ |
516 | set_ld_eol(chan, new); | 575 | set_ld_eol(chan, new); |
517 | 576 | ||
518 | return &new->async_tx; | 577 | return &new->async_tx; |
519 | } | 578 | } |
520 | 579 | ||
521 | static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | 580 | static struct dma_async_tx_descriptor * |
522 | struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src, | 581 | fsl_dma_prep_memcpy(struct dma_chan *dchan, |
582 | dma_addr_t dma_dst, dma_addr_t dma_src, | ||
523 | size_t len, unsigned long flags) | 583 | size_t len, unsigned long flags) |
524 | { | 584 | { |
525 | struct fsldma_chan *chan; | 585 | struct fsldma_chan *chan; |
@@ -539,12 +599,9 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | |||
539 | /* Allocate the link descriptor from DMA pool */ | 599 | /* Allocate the link descriptor from DMA pool */ |
540 | new = fsl_dma_alloc_descriptor(chan); | 600 | new = fsl_dma_alloc_descriptor(chan); |
541 | if (!new) { | 601 | if (!new) { |
542 | dev_err(chan->dev, msg_ld_oom); | 602 | chan_err(chan, "%s\n", msg_ld_oom); |
543 | goto fail; | 603 | goto fail; |
544 | } | 604 | } |
545 | #ifdef FSL_DMA_LD_DEBUG | ||
546 | dev_dbg(chan->dev, "new link desc alloc %p\n", new); | ||
547 | #endif | ||
548 | 605 | ||
549 | copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); | 606 | copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); |
550 | 607 | ||
@@ -572,7 +629,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | |||
572 | new->async_tx.flags = flags; /* client is in control of this ack */ | 629 | new->async_tx.flags = flags; /* client is in control of this ack */ |
573 | new->async_tx.cookie = -EBUSY; | 630 | new->async_tx.cookie = -EBUSY; |
574 | 631 | ||
575 | /* Set End-of-link to the last link descriptor of new list*/ | 632 | /* Set End-of-link to the last link descriptor of new list */ |
576 | set_ld_eol(chan, new); | 633 | set_ld_eol(chan, new); |
577 | 634 | ||
578 | return &first->async_tx; | 635 | return &first->async_tx; |
@@ -627,12 +684,9 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan, | |||
627 | /* allocate and populate the descriptor */ | 684 | /* allocate and populate the descriptor */ |
628 | new = fsl_dma_alloc_descriptor(chan); | 685 | new = fsl_dma_alloc_descriptor(chan); |
629 | if (!new) { | 686 | if (!new) { |
630 | dev_err(chan->dev, msg_ld_oom); | 687 | chan_err(chan, "%s\n", msg_ld_oom); |
631 | goto fail; | 688 | goto fail; |
632 | } | 689 | } |
633 | #ifdef FSL_DMA_LD_DEBUG | ||
634 | dev_dbg(chan->dev, "new link desc alloc %p\n", new); | ||
635 | #endif | ||
636 | 690 | ||
637 | set_desc_cnt(chan, &new->hw, len); | 691 | set_desc_cnt(chan, &new->hw, len); |
638 | set_desc_src(chan, &new->hw, src); | 692 | set_desc_src(chan, &new->hw, src); |
@@ -744,14 +798,15 @@ static int fsl_dma_device_control(struct dma_chan *dchan, | |||
744 | 798 | ||
745 | switch (cmd) { | 799 | switch (cmd) { |
746 | case DMA_TERMINATE_ALL: | 800 | case DMA_TERMINATE_ALL: |
801 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
802 | |||
747 | /* Halt the DMA engine */ | 803 | /* Halt the DMA engine */ |
748 | dma_halt(chan); | 804 | dma_halt(chan); |
749 | 805 | ||
750 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
751 | |||
752 | /* Remove and free all of the descriptors in the LD queue */ | 806 | /* Remove and free all of the descriptors in the LD queue */ |
753 | fsldma_free_desc_list(chan, &chan->ld_pending); | 807 | fsldma_free_desc_list(chan, &chan->ld_pending); |
754 | fsldma_free_desc_list(chan, &chan->ld_running); | 808 | fsldma_free_desc_list(chan, &chan->ld_running); |
809 | chan->idle = true; | ||
755 | 810 | ||
756 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 811 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
757 | return 0; | 812 | return 0; |
@@ -789,140 +844,87 @@ static int fsl_dma_device_control(struct dma_chan *dchan, | |||
789 | } | 844 | } |
790 | 845 | ||
791 | /** | 846 | /** |
792 | * fsl_dma_update_completed_cookie - Update the completed cookie. | 847 | * fsldma_cleanup_descriptor - cleanup and free a single link descriptor |
793 | * @chan : Freescale DMA channel | ||
794 | * | ||
795 | * CONTEXT: hardirq | ||
796 | */ | ||
797 | static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan) | ||
798 | { | ||
799 | struct fsl_desc_sw *desc; | ||
800 | unsigned long flags; | ||
801 | dma_cookie_t cookie; | ||
802 | |||
803 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
804 | |||
805 | if (list_empty(&chan->ld_running)) { | ||
806 | dev_dbg(chan->dev, "no running descriptors\n"); | ||
807 | goto out_unlock; | ||
808 | } | ||
809 | |||
810 | /* Get the last descriptor, update the cookie to that */ | ||
811 | desc = to_fsl_desc(chan->ld_running.prev); | ||
812 | if (dma_is_idle(chan)) | ||
813 | cookie = desc->async_tx.cookie; | ||
814 | else { | ||
815 | cookie = desc->async_tx.cookie - 1; | ||
816 | if (unlikely(cookie < DMA_MIN_COOKIE)) | ||
817 | cookie = DMA_MAX_COOKIE; | ||
818 | } | ||
819 | |||
820 | chan->completed_cookie = cookie; | ||
821 | |||
822 | out_unlock: | ||
823 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
824 | } | ||
825 | |||
826 | /** | ||
827 | * fsldma_desc_status - Check the status of a descriptor | ||
828 | * @chan: Freescale DMA channel | 848 | * @chan: Freescale DMA channel |
829 | * @desc: DMA SW descriptor | 849 | * @desc: descriptor to cleanup and free |
830 | * | ||
831 | * This function will return the status of the given descriptor | ||
832 | */ | ||
833 | static enum dma_status fsldma_desc_status(struct fsldma_chan *chan, | ||
834 | struct fsl_desc_sw *desc) | ||
835 | { | ||
836 | return dma_async_is_complete(desc->async_tx.cookie, | ||
837 | chan->completed_cookie, | ||
838 | chan->common.cookie); | ||
839 | } | ||
840 | |||
841 | /** | ||
842 | * fsl_chan_ld_cleanup - Clean up link descriptors | ||
843 | * @chan : Freescale DMA channel | ||
844 | * | 850 | * |
845 | * This function clean up the ld_queue of DMA channel. | 851 | * This function is used on a descriptor which has been executed by the DMA |
852 | * controller. It will run any callbacks, submit any dependencies, and then | ||
853 | * free the descriptor. | ||
846 | */ | 854 | */ |
847 | static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) | 855 | static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, |
856 | struct fsl_desc_sw *desc) | ||
848 | { | 857 | { |
849 | struct fsl_desc_sw *desc, *_desc; | 858 | struct dma_async_tx_descriptor *txd = &desc->async_tx; |
850 | unsigned long flags; | 859 | struct device *dev = chan->common.device->dev; |
851 | 860 | dma_addr_t src = get_desc_src(chan, desc); | |
852 | spin_lock_irqsave(&chan->desc_lock, flags); | 861 | dma_addr_t dst = get_desc_dst(chan, desc); |
853 | 862 | u32 len = get_desc_cnt(chan, desc); | |
854 | dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie); | 863 | |
855 | list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) { | 864 | /* Run the link descriptor callback function */ |
856 | dma_async_tx_callback callback; | 865 | if (txd->callback) { |
857 | void *callback_param; | 866 | #ifdef FSL_DMA_LD_DEBUG |
858 | 867 | chan_dbg(chan, "LD %p callback\n", desc); | |
859 | if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS) | 868 | #endif |
860 | break; | 869 | txd->callback(txd->callback_param); |
870 | } | ||
861 | 871 | ||
862 | /* Remove from the list of running transactions */ | 872 | /* Run any dependencies */ |
863 | list_del(&desc->node); | 873 | dma_run_dependencies(txd); |
864 | 874 | ||
865 | /* Run the link descriptor callback function */ | 875 | /* Unmap the dst buffer, if requested */ |
866 | callback = desc->async_tx.callback; | 876 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { |
867 | callback_param = desc->async_tx.callback_param; | 877 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) |
868 | if (callback) { | 878 | dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE); |
869 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 879 | else |
870 | dev_dbg(chan->dev, "LD %p callback\n", desc); | 880 | dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE); |
871 | callback(callback_param); | 881 | } |
872 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
873 | } | ||
874 | 882 | ||
875 | /* Run any dependencies, then free the descriptor */ | 883 | /* Unmap the src buffer, if requested */ |
876 | dma_run_dependencies(&desc->async_tx); | 884 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { |
877 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); | 885 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) |
886 | dma_unmap_single(dev, src, len, DMA_TO_DEVICE); | ||
887 | else | ||
888 | dma_unmap_page(dev, src, len, DMA_TO_DEVICE); | ||
878 | } | 889 | } |
879 | 890 | ||
880 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 891 | #ifdef FSL_DMA_LD_DEBUG |
892 | chan_dbg(chan, "LD %p free\n", desc); | ||
893 | #endif | ||
894 | dma_pool_free(chan->desc_pool, desc, txd->phys); | ||
881 | } | 895 | } |
882 | 896 | ||
883 | /** | 897 | /** |
884 | * fsl_chan_xfer_ld_queue - transfer any pending transactions | 898 | * fsl_chan_xfer_ld_queue - transfer any pending transactions |
885 | * @chan : Freescale DMA channel | 899 | * @chan : Freescale DMA channel |
886 | * | 900 | * |
887 | * This will make sure that any pending transactions will be run. | 901 | * HARDWARE STATE: idle |
888 | * If the DMA controller is idle, it will be started. Otherwise, | 902 | * LOCKING: must hold chan->desc_lock |
889 | * the DMA controller's interrupt handler will start any pending | ||
890 | * transactions when it becomes idle. | ||
891 | */ | 903 | */ |
892 | static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) | 904 | static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) |
893 | { | 905 | { |
894 | struct fsl_desc_sw *desc; | 906 | struct fsl_desc_sw *desc; |
895 | unsigned long flags; | ||
896 | |||
897 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
898 | 907 | ||
899 | /* | 908 | /* |
900 | * If the list of pending descriptors is empty, then we | 909 | * If the list of pending descriptors is empty, then we |
901 | * don't need to do any work at all | 910 | * don't need to do any work at all |
902 | */ | 911 | */ |
903 | if (list_empty(&chan->ld_pending)) { | 912 | if (list_empty(&chan->ld_pending)) { |
904 | dev_dbg(chan->dev, "no pending LDs\n"); | 913 | chan_dbg(chan, "no pending LDs\n"); |
905 | goto out_unlock; | 914 | return; |
906 | } | 915 | } |
907 | 916 | ||
908 | /* | 917 | /* |
909 | * The DMA controller is not idle, which means the interrupt | 918 | * The DMA controller is not idle, which means that the interrupt |
910 | * handler will start any queued transactions when it runs | 919 | * handler will start any queued transactions when it runs after |
911 | * at the end of the current transaction | 920 | * this transaction finishes |
912 | */ | 921 | */ |
913 | if (!dma_is_idle(chan)) { | 922 | if (!chan->idle) { |
914 | dev_dbg(chan->dev, "DMA controller still busy\n"); | 923 | chan_dbg(chan, "DMA controller still busy\n"); |
915 | goto out_unlock; | 924 | return; |
916 | } | 925 | } |
917 | 926 | ||
918 | /* | 927 | /* |
919 | * TODO: | ||
920 | * make sure the dma_halt() function really un-wedges the | ||
921 | * controller as much as possible | ||
922 | */ | ||
923 | dma_halt(chan); | ||
924 | |||
925 | /* | ||
926 | * If there are some link descriptors which have not been | 928 | * If there are some link descriptors which have not been |
927 | * transferred, we need to start the controller | 929 | * transferred, we need to start the controller |
928 | */ | 930 | */ |
@@ -931,18 +933,32 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) | |||
931 | * Move all elements from the queue of pending transactions | 933 | * Move all elements from the queue of pending transactions |
932 | * onto the list of running transactions | 934 | * onto the list of running transactions |
933 | */ | 935 | */ |
936 | chan_dbg(chan, "idle, starting controller\n"); | ||
934 | desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); | 937 | desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); |
935 | list_splice_tail_init(&chan->ld_pending, &chan->ld_running); | 938 | list_splice_tail_init(&chan->ld_pending, &chan->ld_running); |
936 | 939 | ||
937 | /* | 940 | /* |
941 | * The 85xx DMA controller doesn't clear the channel start bit | ||
942 | * automatically at the end of a transfer. Therefore we must clear | ||
943 | * it in software before starting the transfer. | ||
944 | */ | ||
945 | if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { | ||
946 | u32 mode; | ||
947 | |||
948 | mode = DMA_IN(chan, &chan->regs->mr, 32); | ||
949 | mode &= ~FSL_DMA_MR_CS; | ||
950 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | ||
951 | } | ||
952 | |||
953 | /* | ||
938 | * Program the descriptor's address into the DMA controller, | 954 | * Program the descriptor's address into the DMA controller, |
939 | * then start the DMA transaction | 955 | * then start the DMA transaction |
940 | */ | 956 | */ |
941 | set_cdar(chan, desc->async_tx.phys); | 957 | set_cdar(chan, desc->async_tx.phys); |
942 | dma_start(chan); | 958 | get_cdar(chan); |
943 | 959 | ||
944 | out_unlock: | 960 | dma_start(chan); |
945 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 961 | chan->idle = false; |
946 | } | 962 | } |
947 | 963 | ||
948 | /** | 964 | /** |
@@ -952,7 +968,11 @@ out_unlock: | |||
952 | static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) | 968 | static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) |
953 | { | 969 | { |
954 | struct fsldma_chan *chan = to_fsl_chan(dchan); | 970 | struct fsldma_chan *chan = to_fsl_chan(dchan); |
971 | unsigned long flags; | ||
972 | |||
973 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
955 | fsl_chan_xfer_ld_queue(chan); | 974 | fsl_chan_xfer_ld_queue(chan); |
975 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
956 | } | 976 | } |
957 | 977 | ||
958 | /** | 978 | /** |
@@ -964,16 +984,18 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan, | |||
964 | struct dma_tx_state *txstate) | 984 | struct dma_tx_state *txstate) |
965 | { | 985 | { |
966 | struct fsldma_chan *chan = to_fsl_chan(dchan); | 986 | struct fsldma_chan *chan = to_fsl_chan(dchan); |
967 | dma_cookie_t last_used; | ||
968 | dma_cookie_t last_complete; | 987 | dma_cookie_t last_complete; |
988 | dma_cookie_t last_used; | ||
989 | unsigned long flags; | ||
969 | 990 | ||
970 | fsl_chan_ld_cleanup(chan); | 991 | spin_lock_irqsave(&chan->desc_lock, flags); |
971 | 992 | ||
972 | last_used = dchan->cookie; | ||
973 | last_complete = chan->completed_cookie; | 993 | last_complete = chan->completed_cookie; |
994 | last_used = dchan->cookie; | ||
974 | 995 | ||
975 | dma_set_tx_state(txstate, last_complete, last_used, 0); | 996 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
976 | 997 | ||
998 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
977 | return dma_async_is_complete(cookie, last_complete, last_used); | 999 | return dma_async_is_complete(cookie, last_complete, last_used); |
978 | } | 1000 | } |
979 | 1001 | ||
@@ -984,21 +1006,20 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan, | |||
984 | static irqreturn_t fsldma_chan_irq(int irq, void *data) | 1006 | static irqreturn_t fsldma_chan_irq(int irq, void *data) |
985 | { | 1007 | { |
986 | struct fsldma_chan *chan = data; | 1008 | struct fsldma_chan *chan = data; |
987 | int update_cookie = 0; | ||
988 | int xfer_ld_q = 0; | ||
989 | u32 stat; | 1009 | u32 stat; |
990 | 1010 | ||
991 | /* save and clear the status register */ | 1011 | /* save and clear the status register */ |
992 | stat = get_sr(chan); | 1012 | stat = get_sr(chan); |
993 | set_sr(chan, stat); | 1013 | set_sr(chan, stat); |
994 | dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat); | 1014 | chan_dbg(chan, "irq: stat = 0x%x\n", stat); |
995 | 1015 | ||
1016 | /* check that this was really our device */ | ||
996 | stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); | 1017 | stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); |
997 | if (!stat) | 1018 | if (!stat) |
998 | return IRQ_NONE; | 1019 | return IRQ_NONE; |
999 | 1020 | ||
1000 | if (stat & FSL_DMA_SR_TE) | 1021 | if (stat & FSL_DMA_SR_TE) |
1001 | dev_err(chan->dev, "Transfer Error!\n"); | 1022 | chan_err(chan, "Transfer Error!\n"); |
1002 | 1023 | ||
1003 | /* | 1024 | /* |
1004 | * Programming Error | 1025 | * Programming Error |
@@ -1006,29 +1027,10 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) | |||
1006 | * triger a PE interrupt. | 1027 | * triger a PE interrupt. |
1007 | */ | 1028 | */ |
1008 | if (stat & FSL_DMA_SR_PE) { | 1029 | if (stat & FSL_DMA_SR_PE) { |
1009 | dev_dbg(chan->dev, "irq: Programming Error INT\n"); | 1030 | chan_dbg(chan, "irq: Programming Error INT\n"); |
1010 | if (get_bcr(chan) == 0) { | ||
1011 | /* BCR register is 0, this is a DMA_INTERRUPT async_tx. | ||
1012 | * Now, update the completed cookie, and continue the | ||
1013 | * next uncompleted transfer. | ||
1014 | */ | ||
1015 | update_cookie = 1; | ||
1016 | xfer_ld_q = 1; | ||
1017 | } | ||
1018 | stat &= ~FSL_DMA_SR_PE; | 1031 | stat &= ~FSL_DMA_SR_PE; |
1019 | } | 1032 | if (get_bcr(chan) != 0) |
1020 | 1033 | chan_err(chan, "Programming Error!\n"); | |
1021 | /* | ||
1022 | * If the link descriptor segment transfer finishes, | ||
1023 | * we will recycle the used descriptor. | ||
1024 | */ | ||
1025 | if (stat & FSL_DMA_SR_EOSI) { | ||
1026 | dev_dbg(chan->dev, "irq: End-of-segments INT\n"); | ||
1027 | dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n", | ||
1028 | (unsigned long long)get_cdar(chan), | ||
1029 | (unsigned long long)get_ndar(chan)); | ||
1030 | stat &= ~FSL_DMA_SR_EOSI; | ||
1031 | update_cookie = 1; | ||
1032 | } | 1034 | } |
1033 | 1035 | ||
1034 | /* | 1036 | /* |
@@ -1036,10 +1038,8 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) | |||
1036 | * and start the next transfer if it exist. | 1038 | * and start the next transfer if it exist. |
1037 | */ | 1039 | */ |
1038 | if (stat & FSL_DMA_SR_EOCDI) { | 1040 | if (stat & FSL_DMA_SR_EOCDI) { |
1039 | dev_dbg(chan->dev, "irq: End-of-Chain link INT\n"); | 1041 | chan_dbg(chan, "irq: End-of-Chain link INT\n"); |
1040 | stat &= ~FSL_DMA_SR_EOCDI; | 1042 | stat &= ~FSL_DMA_SR_EOCDI; |
1041 | update_cookie = 1; | ||
1042 | xfer_ld_q = 1; | ||
1043 | } | 1043 | } |
1044 | 1044 | ||
1045 | /* | 1045 | /* |
@@ -1048,27 +1048,79 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) | |||
1048 | * prepare next transfer. | 1048 | * prepare next transfer. |
1049 | */ | 1049 | */ |
1050 | if (stat & FSL_DMA_SR_EOLNI) { | 1050 | if (stat & FSL_DMA_SR_EOLNI) { |
1051 | dev_dbg(chan->dev, "irq: End-of-link INT\n"); | 1051 | chan_dbg(chan, "irq: End-of-link INT\n"); |
1052 | stat &= ~FSL_DMA_SR_EOLNI; | 1052 | stat &= ~FSL_DMA_SR_EOLNI; |
1053 | xfer_ld_q = 1; | ||
1054 | } | 1053 | } |
1055 | 1054 | ||
1056 | if (update_cookie) | 1055 | /* check that the DMA controller is really idle */ |
1057 | fsl_dma_update_completed_cookie(chan); | 1056 | if (!dma_is_idle(chan)) |
1058 | if (xfer_ld_q) | 1057 | chan_err(chan, "irq: controller not idle!\n"); |
1059 | fsl_chan_xfer_ld_queue(chan); | 1058 | |
1059 | /* check that we handled all of the bits */ | ||
1060 | if (stat) | 1060 | if (stat) |
1061 | dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat); | 1061 | chan_err(chan, "irq: unhandled sr 0x%08x\n", stat); |
1062 | 1062 | ||
1063 | dev_dbg(chan->dev, "irq: Exit\n"); | 1063 | /* |
1064 | * Schedule the tasklet to handle all cleanup of the current | ||
1065 | * transaction. It will start a new transaction if there is | ||
1066 | * one pending. | ||
1067 | */ | ||
1064 | tasklet_schedule(&chan->tasklet); | 1068 | tasklet_schedule(&chan->tasklet); |
1069 | chan_dbg(chan, "irq: Exit\n"); | ||
1065 | return IRQ_HANDLED; | 1070 | return IRQ_HANDLED; |
1066 | } | 1071 | } |
1067 | 1072 | ||
1068 | static void dma_do_tasklet(unsigned long data) | 1073 | static void dma_do_tasklet(unsigned long data) |
1069 | { | 1074 | { |
1070 | struct fsldma_chan *chan = (struct fsldma_chan *)data; | 1075 | struct fsldma_chan *chan = (struct fsldma_chan *)data; |
1071 | fsl_chan_ld_cleanup(chan); | 1076 | struct fsl_desc_sw *desc, *_desc; |
1077 | LIST_HEAD(ld_cleanup); | ||
1078 | unsigned long flags; | ||
1079 | |||
1080 | chan_dbg(chan, "tasklet entry\n"); | ||
1081 | |||
1082 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
1083 | |||
1084 | /* update the cookie if we have some descriptors to cleanup */ | ||
1085 | if (!list_empty(&chan->ld_running)) { | ||
1086 | dma_cookie_t cookie; | ||
1087 | |||
1088 | desc = to_fsl_desc(chan->ld_running.prev); | ||
1089 | cookie = desc->async_tx.cookie; | ||
1090 | |||
1091 | chan->completed_cookie = cookie; | ||
1092 | chan_dbg(chan, "completed_cookie=%d\n", cookie); | ||
1093 | } | ||
1094 | |||
1095 | /* | ||
1096 | * move the descriptors to a temporary list so we can drop the lock | ||
1097 | * during the entire cleanup operation | ||
1098 | */ | ||
1099 | list_splice_tail_init(&chan->ld_running, &ld_cleanup); | ||
1100 | |||
1101 | /* the hardware is now idle and ready for more */ | ||
1102 | chan->idle = true; | ||
1103 | |||
1104 | /* | ||
1105 | * Start any pending transactions automatically | ||
1106 | * | ||
1107 | * In the ideal case, we keep the DMA controller busy while we go | ||
1108 | * ahead and free the descriptors below. | ||
1109 | */ | ||
1110 | fsl_chan_xfer_ld_queue(chan); | ||
1111 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
1112 | |||
1113 | /* Run the callback for each descriptor, in order */ | ||
1114 | list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) { | ||
1115 | |||
1116 | /* Remove from the list of transactions */ | ||
1117 | list_del(&desc->node); | ||
1118 | |||
1119 | /* Run all cleanup for this descriptor */ | ||
1120 | fsldma_cleanup_descriptor(chan, desc); | ||
1121 | } | ||
1122 | |||
1123 | chan_dbg(chan, "tasklet exit\n"); | ||
1072 | } | 1124 | } |
1073 | 1125 | ||
1074 | static irqreturn_t fsldma_ctrl_irq(int irq, void *data) | 1126 | static irqreturn_t fsldma_ctrl_irq(int irq, void *data) |
@@ -1116,7 +1168,7 @@ static void fsldma_free_irqs(struct fsldma_device *fdev) | |||
1116 | for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { | 1168 | for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { |
1117 | chan = fdev->chan[i]; | 1169 | chan = fdev->chan[i]; |
1118 | if (chan && chan->irq != NO_IRQ) { | 1170 | if (chan && chan->irq != NO_IRQ) { |
1119 | dev_dbg(fdev->dev, "free channel %d IRQ\n", chan->id); | 1171 | chan_dbg(chan, "free per-channel IRQ\n"); |
1120 | free_irq(chan->irq, chan); | 1172 | free_irq(chan->irq, chan); |
1121 | } | 1173 | } |
1122 | } | 1174 | } |
@@ -1143,19 +1195,16 @@ static int fsldma_request_irqs(struct fsldma_device *fdev) | |||
1143 | continue; | 1195 | continue; |
1144 | 1196 | ||
1145 | if (chan->irq == NO_IRQ) { | 1197 | if (chan->irq == NO_IRQ) { |
1146 | dev_err(fdev->dev, "no interrupts property defined for " | 1198 | chan_err(chan, "interrupts property missing in device tree\n"); |
1147 | "DMA channel %d. Please fix your " | ||
1148 | "device tree\n", chan->id); | ||
1149 | ret = -ENODEV; | 1199 | ret = -ENODEV; |
1150 | goto out_unwind; | 1200 | goto out_unwind; |
1151 | } | 1201 | } |
1152 | 1202 | ||
1153 | dev_dbg(fdev->dev, "request channel %d IRQ\n", chan->id); | 1203 | chan_dbg(chan, "request per-channel IRQ\n"); |
1154 | ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED, | 1204 | ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED, |
1155 | "fsldma-chan", chan); | 1205 | "fsldma-chan", chan); |
1156 | if (ret) { | 1206 | if (ret) { |
1157 | dev_err(fdev->dev, "unable to request IRQ for DMA " | 1207 | chan_err(chan, "unable to request per-channel IRQ\n"); |
1158 | "channel %d\n", chan->id); | ||
1159 | goto out_unwind; | 1208 | goto out_unwind; |
1160 | } | 1209 | } |
1161 | } | 1210 | } |
@@ -1230,6 +1279,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, | |||
1230 | 1279 | ||
1231 | fdev->chan[chan->id] = chan; | 1280 | fdev->chan[chan->id] = chan; |
1232 | tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); | 1281 | tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); |
1282 | snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id); | ||
1233 | 1283 | ||
1234 | /* Initialize the channel */ | 1284 | /* Initialize the channel */ |
1235 | dma_init(chan); | 1285 | dma_init(chan); |
@@ -1250,6 +1300,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, | |||
1250 | spin_lock_init(&chan->desc_lock); | 1300 | spin_lock_init(&chan->desc_lock); |
1251 | INIT_LIST_HEAD(&chan->ld_pending); | 1301 | INIT_LIST_HEAD(&chan->ld_pending); |
1252 | INIT_LIST_HEAD(&chan->ld_running); | 1302 | INIT_LIST_HEAD(&chan->ld_running); |
1303 | chan->idle = true; | ||
1253 | 1304 | ||
1254 | chan->common.device = &fdev->common; | 1305 | chan->common.device = &fdev->common; |
1255 | 1306 | ||
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index ba9f403c0fbe..9cb5aa57c677 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h | |||
@@ -102,8 +102,8 @@ struct fsl_desc_sw { | |||
102 | } __attribute__((aligned(32))); | 102 | } __attribute__((aligned(32))); |
103 | 103 | ||
104 | struct fsldma_chan_regs { | 104 | struct fsldma_chan_regs { |
105 | u32 mr; /* 0x00 - Mode Register */ | 105 | u32 mr; /* 0x00 - Mode Register */ |
106 | u32 sr; /* 0x04 - Status Register */ | 106 | u32 sr; /* 0x04 - Status Register */ |
107 | u64 cdar; /* 0x08 - Current descriptor address register */ | 107 | u64 cdar; /* 0x08 - Current descriptor address register */ |
108 | u64 sar; /* 0x10 - Source Address Register */ | 108 | u64 sar; /* 0x10 - Source Address Register */ |
109 | u64 dar; /* 0x18 - Destination Address Register */ | 109 | u64 dar; /* 0x18 - Destination Address Register */ |
@@ -135,6 +135,7 @@ struct fsldma_device { | |||
135 | #define FSL_DMA_CHAN_START_EXT 0x00002000 | 135 | #define FSL_DMA_CHAN_START_EXT 0x00002000 |
136 | 136 | ||
137 | struct fsldma_chan { | 137 | struct fsldma_chan { |
138 | char name[8]; /* Channel name */ | ||
138 | struct fsldma_chan_regs __iomem *regs; | 139 | struct fsldma_chan_regs __iomem *regs; |
139 | dma_cookie_t completed_cookie; /* The maximum cookie completed */ | 140 | dma_cookie_t completed_cookie; /* The maximum cookie completed */ |
140 | spinlock_t desc_lock; /* Descriptor operation lock */ | 141 | spinlock_t desc_lock; /* Descriptor operation lock */ |
@@ -147,6 +148,7 @@ struct fsldma_chan { | |||
147 | int id; /* Raw id of this channel */ | 148 | int id; /* Raw id of this channel */ |
148 | struct tasklet_struct tasklet; | 149 | struct tasklet_struct tasklet; |
149 | u32 feature; | 150 | u32 feature; |
151 | bool idle; /* DMA controller is idle */ | ||
150 | 152 | ||
151 | void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable); | 153 | void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable); |
152 | void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable); | 154 | void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable); |
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c new file mode 100644 index 000000000000..88aad4f54002 --- /dev/null +++ b/drivers/dma/mxs-dma.c | |||
@@ -0,0 +1,724 @@ | |||
1 | /* | ||
2 | * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved. | ||
3 | * | ||
4 | * Refer to drivers/dma/imx-sdma.c | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/init.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/clk.h> | ||
16 | #include <linux/wait.h> | ||
17 | #include <linux/sched.h> | ||
18 | #include <linux/semaphore.h> | ||
19 | #include <linux/device.h> | ||
20 | #include <linux/dma-mapping.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/dmaengine.h> | ||
24 | #include <linux/delay.h> | ||
25 | |||
26 | #include <asm/irq.h> | ||
27 | #include <mach/mxs.h> | ||
28 | #include <mach/dma.h> | ||
29 | #include <mach/common.h> | ||
30 | |||
31 | /* | ||
32 | * NOTE: The term "PIO" throughout the mxs-dma implementation means | ||
33 | * PIO mode of mxs apbh-dma and apbx-dma. With this working mode, | ||
34 | * dma can program the controller registers of peripheral devices. | ||
35 | */ | ||
36 | |||
37 | #define MXS_DMA_APBH 0 | ||
38 | #define MXS_DMA_APBX 1 | ||
39 | #define dma_is_apbh() (mxs_dma->dev_id == MXS_DMA_APBH) | ||
40 | |||
41 | #define APBH_VERSION_LATEST 3 | ||
42 | #define apbh_is_old() (mxs_dma->version < APBH_VERSION_LATEST) | ||
43 | |||
44 | #define HW_APBHX_CTRL0 0x000 | ||
45 | #define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29) | ||
46 | #define BM_APBH_CTRL0_APB_BURST_EN (1 << 28) | ||
47 | #define BP_APBH_CTRL0_CLKGATE_CHANNEL 8 | ||
48 | #define BP_APBH_CTRL0_RESET_CHANNEL 16 | ||
49 | #define HW_APBHX_CTRL1 0x010 | ||
50 | #define HW_APBHX_CTRL2 0x020 | ||
51 | #define HW_APBHX_CHANNEL_CTRL 0x030 | ||
52 | #define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16 | ||
53 | #define HW_APBH_VERSION (cpu_is_mx23() ? 0x3f0 : 0x800) | ||
54 | #define HW_APBX_VERSION 0x800 | ||
55 | #define BP_APBHX_VERSION_MAJOR 24 | ||
56 | #define HW_APBHX_CHn_NXTCMDAR(n) \ | ||
57 | (((dma_is_apbh() && apbh_is_old()) ? 0x050 : 0x110) + (n) * 0x70) | ||
58 | #define HW_APBHX_CHn_SEMA(n) \ | ||
59 | (((dma_is_apbh() && apbh_is_old()) ? 0x080 : 0x140) + (n) * 0x70) | ||
60 | |||
61 | /* | ||
62 | * ccw bits definitions | ||
63 | * | ||
64 | * COMMAND: 0..1 (2) | ||
65 | * CHAIN: 2 (1) | ||
66 | * IRQ: 3 (1) | ||
67 | * NAND_LOCK: 4 (1) - not implemented | ||
68 | * NAND_WAIT4READY: 5 (1) - not implemented | ||
69 | * DEC_SEM: 6 (1) | ||
70 | * WAIT4END: 7 (1) | ||
71 | * HALT_ON_TERMINATE: 8 (1) | ||
72 | * TERMINATE_FLUSH: 9 (1) | ||
73 | * RESERVED: 10..11 (2) | ||
74 | * PIO_NUM: 12..15 (4) | ||
75 | */ | ||
76 | #define BP_CCW_COMMAND 0 | ||
77 | #define BM_CCW_COMMAND (3 << 0) | ||
78 | #define CCW_CHAIN (1 << 2) | ||
79 | #define CCW_IRQ (1 << 3) | ||
80 | #define CCW_DEC_SEM (1 << 6) | ||
81 | #define CCW_WAIT4END (1 << 7) | ||
82 | #define CCW_HALT_ON_TERM (1 << 8) | ||
83 | #define CCW_TERM_FLUSH (1 << 9) | ||
84 | #define BP_CCW_PIO_NUM 12 | ||
85 | #define BM_CCW_PIO_NUM (0xf << 12) | ||
86 | |||
87 | #define BF_CCW(value, field) (((value) << BP_CCW_##field) & BM_CCW_##field) | ||
88 | |||
89 | #define MXS_DMA_CMD_NO_XFER 0 | ||
90 | #define MXS_DMA_CMD_WRITE 1 | ||
91 | #define MXS_DMA_CMD_READ 2 | ||
92 | #define MXS_DMA_CMD_DMA_SENSE 3 /* not implemented */ | ||
93 | |||
94 | struct mxs_dma_ccw { | ||
95 | u32 next; | ||
96 | u16 bits; | ||
97 | u16 xfer_bytes; | ||
98 | #define MAX_XFER_BYTES 0xff00 | ||
99 | u32 bufaddr; | ||
100 | #define MXS_PIO_WORDS 16 | ||
101 | u32 pio_words[MXS_PIO_WORDS]; | ||
102 | }; | ||
103 | |||
104 | #define NUM_CCW (int)(PAGE_SIZE / sizeof(struct mxs_dma_ccw)) | ||
105 | |||
106 | struct mxs_dma_chan { | ||
107 | struct mxs_dma_engine *mxs_dma; | ||
108 | struct dma_chan chan; | ||
109 | struct dma_async_tx_descriptor desc; | ||
110 | struct tasklet_struct tasklet; | ||
111 | int chan_irq; | ||
112 | struct mxs_dma_ccw *ccw; | ||
113 | dma_addr_t ccw_phys; | ||
114 | dma_cookie_t last_completed; | ||
115 | enum dma_status status; | ||
116 | unsigned int flags; | ||
117 | #define MXS_DMA_SG_LOOP (1 << 0) | ||
118 | }; | ||
119 | |||
120 | #define MXS_DMA_CHANNELS 16 | ||
121 | #define MXS_DMA_CHANNELS_MASK 0xffff | ||
122 | |||
123 | struct mxs_dma_engine { | ||
124 | int dev_id; | ||
125 | unsigned int version; | ||
126 | void __iomem *base; | ||
127 | struct clk *clk; | ||
128 | struct dma_device dma_device; | ||
129 | struct device_dma_parameters dma_parms; | ||
130 | struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; | ||
131 | }; | ||
132 | |||
133 | static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) | ||
134 | { | ||
135 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
136 | int chan_id = mxs_chan->chan.chan_id; | ||
137 | |||
138 | if (dma_is_apbh() && apbh_is_old()) | ||
139 | writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL), | ||
140 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); | ||
141 | else | ||
142 | writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL), | ||
143 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR); | ||
144 | } | ||
145 | |||
146 | static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) | ||
147 | { | ||
148 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
149 | int chan_id = mxs_chan->chan.chan_id; | ||
150 | |||
151 | /* set cmd_addr up */ | ||
152 | writel(mxs_chan->ccw_phys, | ||
153 | mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id)); | ||
154 | |||
155 | /* enable apbh channel clock */ | ||
156 | if (dma_is_apbh()) { | ||
157 | if (apbh_is_old()) | ||
158 | writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL), | ||
159 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR); | ||
160 | else | ||
161 | writel(1 << chan_id, | ||
162 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR); | ||
163 | } | ||
164 | |||
165 | /* write 1 to SEMA to kick off the channel */ | ||
166 | writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id)); | ||
167 | } | ||
168 | |||
169 | static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) | ||
170 | { | ||
171 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
172 | int chan_id = mxs_chan->chan.chan_id; | ||
173 | |||
174 | /* disable apbh channel clock */ | ||
175 | if (dma_is_apbh()) { | ||
176 | if (apbh_is_old()) | ||
177 | writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL), | ||
178 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); | ||
179 | else | ||
180 | writel(1 << chan_id, | ||
181 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); | ||
182 | } | ||
183 | |||
184 | mxs_chan->status = DMA_SUCCESS; | ||
185 | } | ||
186 | |||
187 | static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) | ||
188 | { | ||
189 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
190 | int chan_id = mxs_chan->chan.chan_id; | ||
191 | |||
192 | /* freeze the channel */ | ||
193 | if (dma_is_apbh() && apbh_is_old()) | ||
194 | writel(1 << chan_id, | ||
195 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); | ||
196 | else | ||
197 | writel(1 << chan_id, | ||
198 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR); | ||
199 | |||
200 | mxs_chan->status = DMA_PAUSED; | ||
201 | } | ||
202 | |||
203 | static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan) | ||
204 | { | ||
205 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
206 | int chan_id = mxs_chan->chan.chan_id; | ||
207 | |||
208 | /* unfreeze the channel */ | ||
209 | if (dma_is_apbh() && apbh_is_old()) | ||
210 | writel(1 << chan_id, | ||
211 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR); | ||
212 | else | ||
213 | writel(1 << chan_id, | ||
214 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_CLR_ADDR); | ||
215 | |||
216 | mxs_chan->status = DMA_IN_PROGRESS; | ||
217 | } | ||
218 | |||
219 | static dma_cookie_t mxs_dma_assign_cookie(struct mxs_dma_chan *mxs_chan) | ||
220 | { | ||
221 | dma_cookie_t cookie = mxs_chan->chan.cookie; | ||
222 | |||
223 | if (++cookie < 0) | ||
224 | cookie = 1; | ||
225 | |||
226 | mxs_chan->chan.cookie = cookie; | ||
227 | mxs_chan->desc.cookie = cookie; | ||
228 | |||
229 | return cookie; | ||
230 | } | ||
231 | |||
232 | static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan) | ||
233 | { | ||
234 | return container_of(chan, struct mxs_dma_chan, chan); | ||
235 | } | ||
236 | |||
237 | static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) | ||
238 | { | ||
239 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(tx->chan); | ||
240 | |||
241 | mxs_dma_enable_chan(mxs_chan); | ||
242 | |||
243 | return mxs_dma_assign_cookie(mxs_chan); | ||
244 | } | ||
245 | |||
246 | static void mxs_dma_tasklet(unsigned long data) | ||
247 | { | ||
248 | struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data; | ||
249 | |||
250 | if (mxs_chan->desc.callback) | ||
251 | mxs_chan->desc.callback(mxs_chan->desc.callback_param); | ||
252 | } | ||
253 | |||
254 | static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) | ||
255 | { | ||
256 | struct mxs_dma_engine *mxs_dma = dev_id; | ||
257 | u32 stat1, stat2; | ||
258 | |||
259 | /* completion status */ | ||
260 | stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1); | ||
261 | stat1 &= MXS_DMA_CHANNELS_MASK; | ||
262 | writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + MXS_CLR_ADDR); | ||
263 | |||
264 | /* error status */ | ||
265 | stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2); | ||
266 | writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + MXS_CLR_ADDR); | ||
267 | |||
268 | /* | ||
269 | * When both completion and error of termination bits set at the | ||
270 | * same time, we do not take it as an error. IOW, it only becomes | ||
271 | * an error we need to handler here in case of ether it's (1) an bus | ||
272 | * error or (2) a termination error with no completion. | ||
273 | */ | ||
274 | stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */ | ||
275 | (~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1); /* (2) */ | ||
276 | |||
277 | /* combine error and completion status for checking */ | ||
278 | stat1 = (stat2 << MXS_DMA_CHANNELS) | stat1; | ||
279 | while (stat1) { | ||
280 | int channel = fls(stat1) - 1; | ||
281 | struct mxs_dma_chan *mxs_chan = | ||
282 | &mxs_dma->mxs_chans[channel % MXS_DMA_CHANNELS]; | ||
283 | |||
284 | if (channel >= MXS_DMA_CHANNELS) { | ||
285 | dev_dbg(mxs_dma->dma_device.dev, | ||
286 | "%s: error in channel %d\n", __func__, | ||
287 | channel - MXS_DMA_CHANNELS); | ||
288 | mxs_chan->status = DMA_ERROR; | ||
289 | mxs_dma_reset_chan(mxs_chan); | ||
290 | } else { | ||
291 | if (mxs_chan->flags & MXS_DMA_SG_LOOP) | ||
292 | mxs_chan->status = DMA_IN_PROGRESS; | ||
293 | else | ||
294 | mxs_chan->status = DMA_SUCCESS; | ||
295 | } | ||
296 | |||
297 | stat1 &= ~(1 << channel); | ||
298 | |||
299 | if (mxs_chan->status == DMA_SUCCESS) | ||
300 | mxs_chan->last_completed = mxs_chan->desc.cookie; | ||
301 | |||
302 | /* schedule tasklet on this channel */ | ||
303 | tasklet_schedule(&mxs_chan->tasklet); | ||
304 | } | ||
305 | |||
306 | return IRQ_HANDLED; | ||
307 | } | ||
308 | |||
309 | static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) | ||
310 | { | ||
311 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | ||
312 | struct mxs_dma_data *data = chan->private; | ||
313 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
314 | int ret; | ||
315 | |||
316 | if (!data) | ||
317 | return -EINVAL; | ||
318 | |||
319 | mxs_chan->chan_irq = data->chan_irq; | ||
320 | |||
321 | mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, | ||
322 | &mxs_chan->ccw_phys, GFP_KERNEL); | ||
323 | if (!mxs_chan->ccw) { | ||
324 | ret = -ENOMEM; | ||
325 | goto err_alloc; | ||
326 | } | ||
327 | |||
328 | memset(mxs_chan->ccw, 0, PAGE_SIZE); | ||
329 | |||
330 | ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, | ||
331 | 0, "mxs-dma", mxs_dma); | ||
332 | if (ret) | ||
333 | goto err_irq; | ||
334 | |||
335 | ret = clk_enable(mxs_dma->clk); | ||
336 | if (ret) | ||
337 | goto err_clk; | ||
338 | |||
339 | mxs_dma_reset_chan(mxs_chan); | ||
340 | |||
341 | dma_async_tx_descriptor_init(&mxs_chan->desc, chan); | ||
342 | mxs_chan->desc.tx_submit = mxs_dma_tx_submit; | ||
343 | |||
344 | /* the descriptor is ready */ | ||
345 | async_tx_ack(&mxs_chan->desc); | ||
346 | |||
347 | return 0; | ||
348 | |||
349 | err_clk: | ||
350 | free_irq(mxs_chan->chan_irq, mxs_dma); | ||
351 | err_irq: | ||
352 | dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, | ||
353 | mxs_chan->ccw, mxs_chan->ccw_phys); | ||
354 | err_alloc: | ||
355 | return ret; | ||
356 | } | ||
357 | |||
358 | static void mxs_dma_free_chan_resources(struct dma_chan *chan) | ||
359 | { | ||
360 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | ||
361 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
362 | |||
363 | mxs_dma_disable_chan(mxs_chan); | ||
364 | |||
365 | free_irq(mxs_chan->chan_irq, mxs_dma); | ||
366 | |||
367 | dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, | ||
368 | mxs_chan->ccw, mxs_chan->ccw_phys); | ||
369 | |||
370 | clk_disable(mxs_dma->clk); | ||
371 | } | ||
372 | |||
373 | static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | ||
374 | struct dma_chan *chan, struct scatterlist *sgl, | ||
375 | unsigned int sg_len, enum dma_data_direction direction, | ||
376 | unsigned long append) | ||
377 | { | ||
378 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | ||
379 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
380 | struct mxs_dma_ccw *ccw; | ||
381 | struct scatterlist *sg; | ||
382 | int i, j; | ||
383 | u32 *pio; | ||
384 | static int idx; | ||
385 | |||
386 | if (mxs_chan->status == DMA_IN_PROGRESS && !append) | ||
387 | return NULL; | ||
388 | |||
389 | if (sg_len + (append ? idx : 0) > NUM_CCW) { | ||
390 | dev_err(mxs_dma->dma_device.dev, | ||
391 | "maximum number of sg exceeded: %d > %d\n", | ||
392 | sg_len, NUM_CCW); | ||
393 | goto err_out; | ||
394 | } | ||
395 | |||
396 | mxs_chan->status = DMA_IN_PROGRESS; | ||
397 | mxs_chan->flags = 0; | ||
398 | |||
399 | /* | ||
400 | * If the sg is prepared with append flag set, the sg | ||
401 | * will be appended to the last prepared sg. | ||
402 | */ | ||
403 | if (append) { | ||
404 | BUG_ON(idx < 1); | ||
405 | ccw = &mxs_chan->ccw[idx - 1]; | ||
406 | ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; | ||
407 | ccw->bits |= CCW_CHAIN; | ||
408 | ccw->bits &= ~CCW_IRQ; | ||
409 | ccw->bits &= ~CCW_DEC_SEM; | ||
410 | ccw->bits &= ~CCW_WAIT4END; | ||
411 | } else { | ||
412 | idx = 0; | ||
413 | } | ||
414 | |||
415 | if (direction == DMA_NONE) { | ||
416 | ccw = &mxs_chan->ccw[idx++]; | ||
417 | pio = (u32 *) sgl; | ||
418 | |||
419 | for (j = 0; j < sg_len;) | ||
420 | ccw->pio_words[j++] = *pio++; | ||
421 | |||
422 | ccw->bits = 0; | ||
423 | ccw->bits |= CCW_IRQ; | ||
424 | ccw->bits |= CCW_DEC_SEM; | ||
425 | ccw->bits |= CCW_WAIT4END; | ||
426 | ccw->bits |= CCW_HALT_ON_TERM; | ||
427 | ccw->bits |= CCW_TERM_FLUSH; | ||
428 | ccw->bits |= BF_CCW(sg_len, PIO_NUM); | ||
429 | ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND); | ||
430 | } else { | ||
431 | for_each_sg(sgl, sg, sg_len, i) { | ||
432 | if (sg->length > MAX_XFER_BYTES) { | ||
433 | dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n", | ||
434 | sg->length, MAX_XFER_BYTES); | ||
435 | goto err_out; | ||
436 | } | ||
437 | |||
438 | ccw = &mxs_chan->ccw[idx++]; | ||
439 | |||
440 | ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; | ||
441 | ccw->bufaddr = sg->dma_address; | ||
442 | ccw->xfer_bytes = sg->length; | ||
443 | |||
444 | ccw->bits = 0; | ||
445 | ccw->bits |= CCW_CHAIN; | ||
446 | ccw->bits |= CCW_HALT_ON_TERM; | ||
447 | ccw->bits |= CCW_TERM_FLUSH; | ||
448 | ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ? | ||
449 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, | ||
450 | COMMAND); | ||
451 | |||
452 | if (i + 1 == sg_len) { | ||
453 | ccw->bits &= ~CCW_CHAIN; | ||
454 | ccw->bits |= CCW_IRQ; | ||
455 | ccw->bits |= CCW_DEC_SEM; | ||
456 | ccw->bits |= CCW_WAIT4END; | ||
457 | } | ||
458 | } | ||
459 | } | ||
460 | |||
461 | return &mxs_chan->desc; | ||
462 | |||
463 | err_out: | ||
464 | mxs_chan->status = DMA_ERROR; | ||
465 | return NULL; | ||
466 | } | ||
467 | |||
468 | static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( | ||
469 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | ||
470 | size_t period_len, enum dma_data_direction direction) | ||
471 | { | ||
472 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | ||
473 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
474 | int num_periods = buf_len / period_len; | ||
475 | int i = 0, buf = 0; | ||
476 | |||
477 | if (mxs_chan->status == DMA_IN_PROGRESS) | ||
478 | return NULL; | ||
479 | |||
480 | mxs_chan->status = DMA_IN_PROGRESS; | ||
481 | mxs_chan->flags |= MXS_DMA_SG_LOOP; | ||
482 | |||
483 | if (num_periods > NUM_CCW) { | ||
484 | dev_err(mxs_dma->dma_device.dev, | ||
485 | "maximum number of sg exceeded: %d > %d\n", | ||
486 | num_periods, NUM_CCW); | ||
487 | goto err_out; | ||
488 | } | ||
489 | |||
490 | if (period_len > MAX_XFER_BYTES) { | ||
491 | dev_err(mxs_dma->dma_device.dev, | ||
492 | "maximum period size exceeded: %d > %d\n", | ||
493 | period_len, MAX_XFER_BYTES); | ||
494 | goto err_out; | ||
495 | } | ||
496 | |||
497 | while (buf < buf_len) { | ||
498 | struct mxs_dma_ccw *ccw = &mxs_chan->ccw[i]; | ||
499 | |||
500 | if (i + 1 == num_periods) | ||
501 | ccw->next = mxs_chan->ccw_phys; | ||
502 | else | ||
503 | ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * (i + 1); | ||
504 | |||
505 | ccw->bufaddr = dma_addr; | ||
506 | ccw->xfer_bytes = period_len; | ||
507 | |||
508 | ccw->bits = 0; | ||
509 | ccw->bits |= CCW_CHAIN; | ||
510 | ccw->bits |= CCW_IRQ; | ||
511 | ccw->bits |= CCW_HALT_ON_TERM; | ||
512 | ccw->bits |= CCW_TERM_FLUSH; | ||
513 | ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ? | ||
514 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); | ||
515 | |||
516 | dma_addr += period_len; | ||
517 | buf += period_len; | ||
518 | |||
519 | i++; | ||
520 | } | ||
521 | |||
522 | return &mxs_chan->desc; | ||
523 | |||
524 | err_out: | ||
525 | mxs_chan->status = DMA_ERROR; | ||
526 | return NULL; | ||
527 | } | ||
528 | |||
529 | static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
530 | unsigned long arg) | ||
531 | { | ||
532 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | ||
533 | int ret = 0; | ||
534 | |||
535 | switch (cmd) { | ||
536 | case DMA_TERMINATE_ALL: | ||
537 | mxs_dma_disable_chan(mxs_chan); | ||
538 | break; | ||
539 | case DMA_PAUSE: | ||
540 | mxs_dma_pause_chan(mxs_chan); | ||
541 | break; | ||
542 | case DMA_RESUME: | ||
543 | mxs_dma_resume_chan(mxs_chan); | ||
544 | break; | ||
545 | default: | ||
546 | ret = -ENOSYS; | ||
547 | } | ||
548 | |||
549 | return ret; | ||
550 | } | ||
551 | |||
552 | static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, | ||
553 | dma_cookie_t cookie, struct dma_tx_state *txstate) | ||
554 | { | ||
555 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | ||
556 | dma_cookie_t last_used; | ||
557 | |||
558 | last_used = chan->cookie; | ||
559 | dma_set_tx_state(txstate, mxs_chan->last_completed, last_used, 0); | ||
560 | |||
561 | return mxs_chan->status; | ||
562 | } | ||
563 | |||
564 | static void mxs_dma_issue_pending(struct dma_chan *chan) | ||
565 | { | ||
566 | /* | ||
567 | * Nothing to do. We only have a single descriptor. | ||
568 | */ | ||
569 | } | ||
570 | |||
571 | static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) | ||
572 | { | ||
573 | int ret; | ||
574 | |||
575 | ret = clk_enable(mxs_dma->clk); | ||
576 | if (ret) | ||
577 | goto err_out; | ||
578 | |||
579 | ret = mxs_reset_block(mxs_dma->base); | ||
580 | if (ret) | ||
581 | goto err_out; | ||
582 | |||
583 | /* only major version matters */ | ||
584 | mxs_dma->version = readl(mxs_dma->base + | ||
585 | ((mxs_dma->dev_id == MXS_DMA_APBX) ? | ||
586 | HW_APBX_VERSION : HW_APBH_VERSION)) >> | ||
587 | BP_APBHX_VERSION_MAJOR; | ||
588 | |||
589 | /* enable apbh burst */ | ||
590 | if (dma_is_apbh()) { | ||
591 | writel(BM_APBH_CTRL0_APB_BURST_EN, | ||
592 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); | ||
593 | writel(BM_APBH_CTRL0_APB_BURST8_EN, | ||
594 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); | ||
595 | } | ||
596 | |||
597 | /* enable irq for all the channels */ | ||
598 | writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS, | ||
599 | mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR); | ||
600 | |||
601 | clk_disable(mxs_dma->clk); | ||
602 | |||
603 | return 0; | ||
604 | |||
605 | err_out: | ||
606 | return ret; | ||
607 | } | ||
608 | |||
609 | static int __init mxs_dma_probe(struct platform_device *pdev) | ||
610 | { | ||
611 | const struct platform_device_id *id_entry = | ||
612 | platform_get_device_id(pdev); | ||
613 | struct mxs_dma_engine *mxs_dma; | ||
614 | struct resource *iores; | ||
615 | int ret, i; | ||
616 | |||
617 | mxs_dma = kzalloc(sizeof(*mxs_dma), GFP_KERNEL); | ||
618 | if (!mxs_dma) | ||
619 | return -ENOMEM; | ||
620 | |||
621 | mxs_dma->dev_id = id_entry->driver_data; | ||
622 | |||
623 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
624 | |||
625 | if (!request_mem_region(iores->start, resource_size(iores), | ||
626 | pdev->name)) { | ||
627 | ret = -EBUSY; | ||
628 | goto err_request_region; | ||
629 | } | ||
630 | |||
631 | mxs_dma->base = ioremap(iores->start, resource_size(iores)); | ||
632 | if (!mxs_dma->base) { | ||
633 | ret = -ENOMEM; | ||
634 | goto err_ioremap; | ||
635 | } | ||
636 | |||
637 | mxs_dma->clk = clk_get(&pdev->dev, NULL); | ||
638 | if (IS_ERR(mxs_dma->clk)) { | ||
639 | ret = PTR_ERR(mxs_dma->clk); | ||
640 | goto err_clk; | ||
641 | } | ||
642 | |||
643 | dma_cap_set(DMA_SLAVE, mxs_dma->dma_device.cap_mask); | ||
644 | dma_cap_set(DMA_CYCLIC, mxs_dma->dma_device.cap_mask); | ||
645 | |||
646 | INIT_LIST_HEAD(&mxs_dma->dma_device.channels); | ||
647 | |||
648 | /* Initialize channel parameters */ | ||
649 | for (i = 0; i < MXS_DMA_CHANNELS; i++) { | ||
650 | struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i]; | ||
651 | |||
652 | mxs_chan->mxs_dma = mxs_dma; | ||
653 | mxs_chan->chan.device = &mxs_dma->dma_device; | ||
654 | |||
655 | tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet, | ||
656 | (unsigned long) mxs_chan); | ||
657 | |||
658 | |||
659 | /* Add the channel to mxs_chan list */ | ||
660 | list_add_tail(&mxs_chan->chan.device_node, | ||
661 | &mxs_dma->dma_device.channels); | ||
662 | } | ||
663 | |||
664 | ret = mxs_dma_init(mxs_dma); | ||
665 | if (ret) | ||
666 | goto err_init; | ||
667 | |||
668 | mxs_dma->dma_device.dev = &pdev->dev; | ||
669 | |||
670 | /* mxs_dma gets 65535 bytes maximum sg size */ | ||
671 | mxs_dma->dma_device.dev->dma_parms = &mxs_dma->dma_parms; | ||
672 | dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES); | ||
673 | |||
674 | mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources; | ||
675 | mxs_dma->dma_device.device_free_chan_resources = mxs_dma_free_chan_resources; | ||
676 | mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status; | ||
677 | mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg; | ||
678 | mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic; | ||
679 | mxs_dma->dma_device.device_control = mxs_dma_control; | ||
680 | mxs_dma->dma_device.device_issue_pending = mxs_dma_issue_pending; | ||
681 | |||
682 | ret = dma_async_device_register(&mxs_dma->dma_device); | ||
683 | if (ret) { | ||
684 | dev_err(mxs_dma->dma_device.dev, "unable to register\n"); | ||
685 | goto err_init; | ||
686 | } | ||
687 | |||
688 | dev_info(mxs_dma->dma_device.dev, "initialized\n"); | ||
689 | |||
690 | return 0; | ||
691 | |||
692 | err_init: | ||
693 | clk_put(mxs_dma->clk); | ||
694 | err_clk: | ||
695 | iounmap(mxs_dma->base); | ||
696 | err_ioremap: | ||
697 | release_mem_region(iores->start, resource_size(iores)); | ||
698 | err_request_region: | ||
699 | kfree(mxs_dma); | ||
700 | return ret; | ||
701 | } | ||
702 | |||
703 | static struct platform_device_id mxs_dma_type[] = { | ||
704 | { | ||
705 | .name = "mxs-dma-apbh", | ||
706 | .driver_data = MXS_DMA_APBH, | ||
707 | }, { | ||
708 | .name = "mxs-dma-apbx", | ||
709 | .driver_data = MXS_DMA_APBX, | ||
710 | } | ||
711 | }; | ||
712 | |||
713 | static struct platform_driver mxs_dma_driver = { | ||
714 | .driver = { | ||
715 | .name = "mxs-dma", | ||
716 | }, | ||
717 | .id_table = mxs_dma_type, | ||
718 | }; | ||
719 | |||
720 | static int __init mxs_dma_module_init(void) | ||
721 | { | ||
722 | return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe); | ||
723 | } | ||
724 | subsys_initcall(mxs_dma_module_init); | ||
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 1c38418ae61f..8d8fef1480a9 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c | |||
@@ -82,7 +82,7 @@ struct pch_dma_regs { | |||
82 | u32 dma_sts1; | 82 | u32 dma_sts1; |
83 | u32 reserved2; | 83 | u32 reserved2; |
84 | u32 reserved3; | 84 | u32 reserved3; |
85 | struct pch_dma_desc_regs desc[0]; | 85 | struct pch_dma_desc_regs desc[MAX_CHAN_NR]; |
86 | }; | 86 | }; |
87 | 87 | ||
88 | struct pch_dma_desc { | 88 | struct pch_dma_desc { |
@@ -124,7 +124,7 @@ struct pch_dma { | |||
124 | struct pci_pool *pool; | 124 | struct pci_pool *pool; |
125 | struct pch_dma_regs regs; | 125 | struct pch_dma_regs regs; |
126 | struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR]; | 126 | struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR]; |
127 | struct pch_dma_chan channels[0]; | 127 | struct pch_dma_chan channels[MAX_CHAN_NR]; |
128 | }; | 128 | }; |
129 | 129 | ||
130 | #define PCH_DMA_CTL0 0x00 | 130 | #define PCH_DMA_CTL0 0x00 |
@@ -366,7 +366,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) | |||
366 | struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan); | 366 | struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan); |
367 | dma_cookie_t cookie; | 367 | dma_cookie_t cookie; |
368 | 368 | ||
369 | spin_lock_bh(&pd_chan->lock); | 369 | spin_lock(&pd_chan->lock); |
370 | cookie = pdc_assign_cookie(pd_chan, desc); | 370 | cookie = pdc_assign_cookie(pd_chan, desc); |
371 | 371 | ||
372 | if (list_empty(&pd_chan->active_list)) { | 372 | if (list_empty(&pd_chan->active_list)) { |
@@ -376,7 +376,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) | |||
376 | list_add_tail(&desc->desc_node, &pd_chan->queue); | 376 | list_add_tail(&desc->desc_node, &pd_chan->queue); |
377 | } | 377 | } |
378 | 378 | ||
379 | spin_unlock_bh(&pd_chan->lock); | 379 | spin_unlock(&pd_chan->lock); |
380 | return 0; | 380 | return 0; |
381 | } | 381 | } |
382 | 382 | ||
@@ -386,7 +386,7 @@ static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags) | |||
386 | struct pch_dma *pd = to_pd(chan->device); | 386 | struct pch_dma *pd = to_pd(chan->device); |
387 | dma_addr_t addr; | 387 | dma_addr_t addr; |
388 | 388 | ||
389 | desc = pci_pool_alloc(pd->pool, GFP_KERNEL, &addr); | 389 | desc = pci_pool_alloc(pd->pool, flags, &addr); |
390 | if (desc) { | 390 | if (desc) { |
391 | memset(desc, 0, sizeof(struct pch_dma_desc)); | 391 | memset(desc, 0, sizeof(struct pch_dma_desc)); |
392 | INIT_LIST_HEAD(&desc->tx_list); | 392 | INIT_LIST_HEAD(&desc->tx_list); |
@@ -405,7 +405,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan) | |||
405 | struct pch_dma_desc *ret = NULL; | 405 | struct pch_dma_desc *ret = NULL; |
406 | int i; | 406 | int i; |
407 | 407 | ||
408 | spin_lock_bh(&pd_chan->lock); | 408 | spin_lock(&pd_chan->lock); |
409 | list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { | 409 | list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { |
410 | i++; | 410 | i++; |
411 | if (async_tx_test_ack(&desc->txd)) { | 411 | if (async_tx_test_ack(&desc->txd)) { |
@@ -415,15 +415,15 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan) | |||
415 | } | 415 | } |
416 | dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc); | 416 | dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc); |
417 | } | 417 | } |
418 | spin_unlock_bh(&pd_chan->lock); | 418 | spin_unlock(&pd_chan->lock); |
419 | dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i); | 419 | dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i); |
420 | 420 | ||
421 | if (!ret) { | 421 | if (!ret) { |
422 | ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO); | 422 | ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO); |
423 | if (ret) { | 423 | if (ret) { |
424 | spin_lock_bh(&pd_chan->lock); | 424 | spin_lock(&pd_chan->lock); |
425 | pd_chan->descs_allocated++; | 425 | pd_chan->descs_allocated++; |
426 | spin_unlock_bh(&pd_chan->lock); | 426 | spin_unlock(&pd_chan->lock); |
427 | } else { | 427 | } else { |
428 | dev_err(chan2dev(&pd_chan->chan), | 428 | dev_err(chan2dev(&pd_chan->chan), |
429 | "failed to alloc desc\n"); | 429 | "failed to alloc desc\n"); |
@@ -437,10 +437,10 @@ static void pdc_desc_put(struct pch_dma_chan *pd_chan, | |||
437 | struct pch_dma_desc *desc) | 437 | struct pch_dma_desc *desc) |
438 | { | 438 | { |
439 | if (desc) { | 439 | if (desc) { |
440 | spin_lock_bh(&pd_chan->lock); | 440 | spin_lock(&pd_chan->lock); |
441 | list_splice_init(&desc->tx_list, &pd_chan->free_list); | 441 | list_splice_init(&desc->tx_list, &pd_chan->free_list); |
442 | list_add(&desc->desc_node, &pd_chan->free_list); | 442 | list_add(&desc->desc_node, &pd_chan->free_list); |
443 | spin_unlock_bh(&pd_chan->lock); | 443 | spin_unlock(&pd_chan->lock); |
444 | } | 444 | } |
445 | } | 445 | } |
446 | 446 | ||
@@ -530,9 +530,9 @@ static void pd_issue_pending(struct dma_chan *chan) | |||
530 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | 530 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
531 | 531 | ||
532 | if (pdc_is_idle(pd_chan)) { | 532 | if (pdc_is_idle(pd_chan)) { |
533 | spin_lock_bh(&pd_chan->lock); | 533 | spin_lock(&pd_chan->lock); |
534 | pdc_advance_work(pd_chan); | 534 | pdc_advance_work(pd_chan); |
535 | spin_unlock_bh(&pd_chan->lock); | 535 | spin_unlock(&pd_chan->lock); |
536 | } | 536 | } |
537 | } | 537 | } |
538 | 538 | ||
@@ -592,7 +592,6 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, | |||
592 | goto err_desc_get; | 592 | goto err_desc_get; |
593 | } | 593 | } |
594 | 594 | ||
595 | |||
596 | if (!first) { | 595 | if (!first) { |
597 | first = desc; | 596 | first = desc; |
598 | } else { | 597 | } else { |
@@ -641,13 +640,13 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
641 | 640 | ||
642 | spin_unlock_bh(&pd_chan->lock); | 641 | spin_unlock_bh(&pd_chan->lock); |
643 | 642 | ||
644 | |||
645 | return 0; | 643 | return 0; |
646 | } | 644 | } |
647 | 645 | ||
648 | static void pdc_tasklet(unsigned long data) | 646 | static void pdc_tasklet(unsigned long data) |
649 | { | 647 | { |
650 | struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data; | 648 | struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data; |
649 | unsigned long flags; | ||
651 | 650 | ||
652 | if (!pdc_is_idle(pd_chan)) { | 651 | if (!pdc_is_idle(pd_chan)) { |
653 | dev_err(chan2dev(&pd_chan->chan), | 652 | dev_err(chan2dev(&pd_chan->chan), |
@@ -655,12 +654,12 @@ static void pdc_tasklet(unsigned long data) | |||
655 | return; | 654 | return; |
656 | } | 655 | } |
657 | 656 | ||
658 | spin_lock_bh(&pd_chan->lock); | 657 | spin_lock_irqsave(&pd_chan->lock, flags); |
659 | if (test_and_clear_bit(0, &pd_chan->err_status)) | 658 | if (test_and_clear_bit(0, &pd_chan->err_status)) |
660 | pdc_handle_error(pd_chan); | 659 | pdc_handle_error(pd_chan); |
661 | else | 660 | else |
662 | pdc_advance_work(pd_chan); | 661 | pdc_advance_work(pd_chan); |
663 | spin_unlock_bh(&pd_chan->lock); | 662 | spin_unlock_irqrestore(&pd_chan->lock, flags); |
664 | } | 663 | } |
665 | 664 | ||
666 | static irqreturn_t pd_irq(int irq, void *devid) | 665 | static irqreturn_t pd_irq(int irq, void *devid) |
@@ -694,6 +693,7 @@ static irqreturn_t pd_irq(int irq, void *devid) | |||
694 | return ret; | 693 | return ret; |
695 | } | 694 | } |
696 | 695 | ||
696 | #ifdef CONFIG_PM | ||
697 | static void pch_dma_save_regs(struct pch_dma *pd) | 697 | static void pch_dma_save_regs(struct pch_dma *pd) |
698 | { | 698 | { |
699 | struct pch_dma_chan *pd_chan; | 699 | struct pch_dma_chan *pd_chan; |
@@ -771,6 +771,7 @@ static int pch_dma_resume(struct pci_dev *pdev) | |||
771 | 771 | ||
772 | return 0; | 772 | return 0; |
773 | } | 773 | } |
774 | #endif | ||
774 | 775 | ||
775 | static int __devinit pch_dma_probe(struct pci_dev *pdev, | 776 | static int __devinit pch_dma_probe(struct pci_dev *pdev, |
776 | const struct pci_device_id *id) | 777 | const struct pci_device_id *id) |
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 6e1d46a65d0e..af955de035f4 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -68,6 +68,7 @@ enum d40_command { | |||
68 | * @base: Pointer to memory area when the pre_alloc_lli's are not large | 68 | * @base: Pointer to memory area when the pre_alloc_lli's are not large |
69 | * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if | 69 | * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if |
70 | * pre_alloc_lli is used. | 70 | * pre_alloc_lli is used. |
71 | * @dma_addr: DMA address, if mapped | ||
71 | * @size: The size in bytes of the memory at base or the size of pre_alloc_lli. | 72 | * @size: The size in bytes of the memory at base or the size of pre_alloc_lli. |
72 | * @pre_alloc_lli: Pre allocated area for the most common case of transfers, | 73 | * @pre_alloc_lli: Pre allocated area for the most common case of transfers, |
73 | * one buffer to one buffer. | 74 | * one buffer to one buffer. |
@@ -75,6 +76,7 @@ enum d40_command { | |||
75 | struct d40_lli_pool { | 76 | struct d40_lli_pool { |
76 | void *base; | 77 | void *base; |
77 | int size; | 78 | int size; |
79 | dma_addr_t dma_addr; | ||
78 | /* Space for dst and src, plus an extra for padding */ | 80 | /* Space for dst and src, plus an extra for padding */ |
79 | u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; | 81 | u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; |
80 | }; | 82 | }; |
@@ -94,7 +96,6 @@ struct d40_lli_pool { | |||
94 | * during a transfer. | 96 | * during a transfer. |
95 | * @node: List entry. | 97 | * @node: List entry. |
96 | * @is_in_client_list: true if the client owns this descriptor. | 98 | * @is_in_client_list: true if the client owns this descriptor. |
97 | * @is_hw_linked: true if this job will automatically be continued for | ||
98 | * the previous one. | 99 | * the previous one. |
99 | * | 100 | * |
100 | * This descriptor is used for both logical and physical transfers. | 101 | * This descriptor is used for both logical and physical transfers. |
@@ -114,7 +115,7 @@ struct d40_desc { | |||
114 | struct list_head node; | 115 | struct list_head node; |
115 | 116 | ||
116 | bool is_in_client_list; | 117 | bool is_in_client_list; |
117 | bool is_hw_linked; | 118 | bool cyclic; |
118 | }; | 119 | }; |
119 | 120 | ||
120 | /** | 121 | /** |
@@ -130,6 +131,7 @@ struct d40_desc { | |||
130 | */ | 131 | */ |
131 | struct d40_lcla_pool { | 132 | struct d40_lcla_pool { |
132 | void *base; | 133 | void *base; |
134 | dma_addr_t dma_addr; | ||
133 | void *base_unaligned; | 135 | void *base_unaligned; |
134 | int pages; | 136 | int pages; |
135 | spinlock_t lock; | 137 | spinlock_t lock; |
@@ -303,9 +305,37 @@ struct d40_reg_val { | |||
303 | unsigned int val; | 305 | unsigned int val; |
304 | }; | 306 | }; |
305 | 307 | ||
306 | static int d40_pool_lli_alloc(struct d40_desc *d40d, | 308 | static struct device *chan2dev(struct d40_chan *d40c) |
307 | int lli_len, bool is_log) | ||
308 | { | 309 | { |
310 | return &d40c->chan.dev->device; | ||
311 | } | ||
312 | |||
313 | static bool chan_is_physical(struct d40_chan *chan) | ||
314 | { | ||
315 | return chan->log_num == D40_PHY_CHAN; | ||
316 | } | ||
317 | |||
318 | static bool chan_is_logical(struct d40_chan *chan) | ||
319 | { | ||
320 | return !chan_is_physical(chan); | ||
321 | } | ||
322 | |||
323 | static void __iomem *chan_base(struct d40_chan *chan) | ||
324 | { | ||
325 | return chan->base->virtbase + D40_DREG_PCBASE + | ||
326 | chan->phy_chan->num * D40_DREG_PCDELTA; | ||
327 | } | ||
328 | |||
329 | #define d40_err(dev, format, arg...) \ | ||
330 | dev_err(dev, "[%s] " format, __func__, ## arg) | ||
331 | |||
332 | #define chan_err(d40c, format, arg...) \ | ||
333 | d40_err(chan2dev(d40c), format, ## arg) | ||
334 | |||
335 | static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d, | ||
336 | int lli_len) | ||
337 | { | ||
338 | bool is_log = chan_is_logical(d40c); | ||
309 | u32 align; | 339 | u32 align; |
310 | void *base; | 340 | void *base; |
311 | 341 | ||
@@ -319,7 +349,7 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d, | |||
319 | d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli); | 349 | d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli); |
320 | d40d->lli_pool.base = NULL; | 350 | d40d->lli_pool.base = NULL; |
321 | } else { | 351 | } else { |
322 | d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align); | 352 | d40d->lli_pool.size = lli_len * 2 * align; |
323 | 353 | ||
324 | base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT); | 354 | base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT); |
325 | d40d->lli_pool.base = base; | 355 | d40d->lli_pool.base = base; |
@@ -329,22 +359,37 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d, | |||
329 | } | 359 | } |
330 | 360 | ||
331 | if (is_log) { | 361 | if (is_log) { |
332 | d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base, | 362 | d40d->lli_log.src = PTR_ALIGN(base, align); |
333 | align); | 363 | d40d->lli_log.dst = d40d->lli_log.src + lli_len; |
334 | d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len, | 364 | |
335 | align); | 365 | d40d->lli_pool.dma_addr = 0; |
336 | } else { | 366 | } else { |
337 | d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base, | 367 | d40d->lli_phy.src = PTR_ALIGN(base, align); |
338 | align); | 368 | d40d->lli_phy.dst = d40d->lli_phy.src + lli_len; |
339 | d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len, | 369 | |
340 | align); | 370 | d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev, |
371 | d40d->lli_phy.src, | ||
372 | d40d->lli_pool.size, | ||
373 | DMA_TO_DEVICE); | ||
374 | |||
375 | if (dma_mapping_error(d40c->base->dev, | ||
376 | d40d->lli_pool.dma_addr)) { | ||
377 | kfree(d40d->lli_pool.base); | ||
378 | d40d->lli_pool.base = NULL; | ||
379 | d40d->lli_pool.dma_addr = 0; | ||
380 | return -ENOMEM; | ||
381 | } | ||
341 | } | 382 | } |
342 | 383 | ||
343 | return 0; | 384 | return 0; |
344 | } | 385 | } |
345 | 386 | ||
346 | static void d40_pool_lli_free(struct d40_desc *d40d) | 387 | static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d) |
347 | { | 388 | { |
389 | if (d40d->lli_pool.dma_addr) | ||
390 | dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr, | ||
391 | d40d->lli_pool.size, DMA_TO_DEVICE); | ||
392 | |||
348 | kfree(d40d->lli_pool.base); | 393 | kfree(d40d->lli_pool.base); |
349 | d40d->lli_pool.base = NULL; | 394 | d40d->lli_pool.base = NULL; |
350 | d40d->lli_pool.size = 0; | 395 | d40d->lli_pool.size = 0; |
@@ -391,7 +436,7 @@ static int d40_lcla_free_all(struct d40_chan *d40c, | |||
391 | int i; | 436 | int i; |
392 | int ret = -EINVAL; | 437 | int ret = -EINVAL; |
393 | 438 | ||
394 | if (d40c->log_num == D40_PHY_CHAN) | 439 | if (chan_is_physical(d40c)) |
395 | return 0; | 440 | return 0; |
396 | 441 | ||
397 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); | 442 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); |
@@ -430,7 +475,7 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c) | |||
430 | 475 | ||
431 | list_for_each_entry_safe(d, _d, &d40c->client, node) | 476 | list_for_each_entry_safe(d, _d, &d40c->client, node) |
432 | if (async_tx_test_ack(&d->txd)) { | 477 | if (async_tx_test_ack(&d->txd)) { |
433 | d40_pool_lli_free(d); | 478 | d40_pool_lli_free(d40c, d); |
434 | d40_desc_remove(d); | 479 | d40_desc_remove(d); |
435 | desc = d; | 480 | desc = d; |
436 | memset(desc, 0, sizeof(*desc)); | 481 | memset(desc, 0, sizeof(*desc)); |
@@ -450,6 +495,7 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c) | |||
450 | static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) | 495 | static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) |
451 | { | 496 | { |
452 | 497 | ||
498 | d40_pool_lli_free(d40c, d40d); | ||
453 | d40_lcla_free_all(d40c, d40d); | 499 | d40_lcla_free_all(d40c, d40d); |
454 | kmem_cache_free(d40c->base->desc_slab, d40d); | 500 | kmem_cache_free(d40c->base->desc_slab, d40d); |
455 | } | 501 | } |
@@ -459,57 +505,128 @@ static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc) | |||
459 | list_add_tail(&desc->node, &d40c->active); | 505 | list_add_tail(&desc->node, &d40c->active); |
460 | } | 506 | } |
461 | 507 | ||
462 | static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) | 508 | static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc) |
463 | { | 509 | { |
464 | int curr_lcla = -EINVAL, next_lcla; | 510 | struct d40_phy_lli *lli_dst = desc->lli_phy.dst; |
511 | struct d40_phy_lli *lli_src = desc->lli_phy.src; | ||
512 | void __iomem *base = chan_base(chan); | ||
513 | |||
514 | writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG); | ||
515 | writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT); | ||
516 | writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR); | ||
517 | writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK); | ||
518 | |||
519 | writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG); | ||
520 | writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT); | ||
521 | writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR); | ||
522 | writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK); | ||
523 | } | ||
465 | 524 | ||
466 | if (d40c->log_num == D40_PHY_CHAN) { | 525 | static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) |
467 | d40_phy_lli_write(d40c->base->virtbase, | 526 | { |
468 | d40c->phy_chan->num, | 527 | struct d40_lcla_pool *pool = &chan->base->lcla_pool; |
469 | d40d->lli_phy.dst, | 528 | struct d40_log_lli_bidir *lli = &desc->lli_log; |
470 | d40d->lli_phy.src); | 529 | int lli_current = desc->lli_current; |
471 | d40d->lli_current = d40d->lli_len; | 530 | int lli_len = desc->lli_len; |
472 | } else { | 531 | bool cyclic = desc->cyclic; |
532 | int curr_lcla = -EINVAL; | ||
533 | int first_lcla = 0; | ||
534 | bool linkback; | ||
473 | 535 | ||
474 | if ((d40d->lli_len - d40d->lli_current) > 1) | 536 | /* |
475 | curr_lcla = d40_lcla_alloc_one(d40c, d40d); | 537 | * We may have partially running cyclic transfers, in case we did't get |
538 | * enough LCLA entries. | ||
539 | */ | ||
540 | linkback = cyclic && lli_current == 0; | ||
476 | 541 | ||
477 | d40_log_lli_lcpa_write(d40c->lcpa, | 542 | /* |
478 | &d40d->lli_log.dst[d40d->lli_current], | 543 | * For linkback, we need one LCLA even with only one link, because we |
479 | &d40d->lli_log.src[d40d->lli_current], | 544 | * can't link back to the one in LCPA space |
480 | curr_lcla); | 545 | */ |
546 | if (linkback || (lli_len - lli_current > 1)) { | ||
547 | curr_lcla = d40_lcla_alloc_one(chan, desc); | ||
548 | first_lcla = curr_lcla; | ||
549 | } | ||
481 | 550 | ||
482 | d40d->lli_current++; | 551 | /* |
483 | for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) { | 552 | * For linkback, we normally load the LCPA in the loop since we need to |
484 | struct d40_log_lli *lcla; | 553 | * link it to the second LCLA and not the first. However, if we |
554 | * couldn't even get a first LCLA, then we have to run in LCPA and | ||
555 | * reload manually. | ||
556 | */ | ||
557 | if (!linkback || curr_lcla == -EINVAL) { | ||
558 | unsigned int flags = 0; | ||
485 | 559 | ||
486 | if (d40d->lli_current + 1 < d40d->lli_len) | 560 | if (curr_lcla == -EINVAL) |
487 | next_lcla = d40_lcla_alloc_one(d40c, d40d); | 561 | flags |= LLI_TERM_INT; |
488 | else | ||
489 | next_lcla = -EINVAL; | ||
490 | 562 | ||
491 | lcla = d40c->base->lcla_pool.base + | 563 | d40_log_lli_lcpa_write(chan->lcpa, |
492 | d40c->phy_chan->num * 1024 + | 564 | &lli->dst[lli_current], |
493 | 8 * curr_lcla * 2; | 565 | &lli->src[lli_current], |
566 | curr_lcla, | ||
567 | flags); | ||
568 | lli_current++; | ||
569 | } | ||
494 | 570 | ||
495 | d40_log_lli_lcla_write(lcla, | 571 | if (curr_lcla < 0) |
496 | &d40d->lli_log.dst[d40d->lli_current], | 572 | goto out; |
497 | &d40d->lli_log.src[d40d->lli_current], | ||
498 | next_lcla); | ||
499 | 573 | ||
500 | (void) dma_map_single(d40c->base->dev, lcla, | 574 | for (; lli_current < lli_len; lli_current++) { |
501 | 2 * sizeof(struct d40_log_lli), | 575 | unsigned int lcla_offset = chan->phy_chan->num * 1024 + |
502 | DMA_TO_DEVICE); | 576 | 8 * curr_lcla * 2; |
577 | struct d40_log_lli *lcla = pool->base + lcla_offset; | ||
578 | unsigned int flags = 0; | ||
579 | int next_lcla; | ||
503 | 580 | ||
504 | curr_lcla = next_lcla; | 581 | if (lli_current + 1 < lli_len) |
582 | next_lcla = d40_lcla_alloc_one(chan, desc); | ||
583 | else | ||
584 | next_lcla = linkback ? first_lcla : -EINVAL; | ||
505 | 585 | ||
506 | if (curr_lcla == -EINVAL) { | 586 | if (cyclic || next_lcla == -EINVAL) |
507 | d40d->lli_current++; | 587 | flags |= LLI_TERM_INT; |
508 | break; | 588 | |
509 | } | 589 | if (linkback && curr_lcla == first_lcla) { |
590 | /* First link goes in both LCPA and LCLA */ | ||
591 | d40_log_lli_lcpa_write(chan->lcpa, | ||
592 | &lli->dst[lli_current], | ||
593 | &lli->src[lli_current], | ||
594 | next_lcla, flags); | ||
595 | } | ||
596 | |||
597 | /* | ||
598 | * One unused LCLA in the cyclic case if the very first | ||
599 | * next_lcla fails... | ||
600 | */ | ||
601 | d40_log_lli_lcla_write(lcla, | ||
602 | &lli->dst[lli_current], | ||
603 | &lli->src[lli_current], | ||
604 | next_lcla, flags); | ||
605 | |||
606 | dma_sync_single_range_for_device(chan->base->dev, | ||
607 | pool->dma_addr, lcla_offset, | ||
608 | 2 * sizeof(struct d40_log_lli), | ||
609 | DMA_TO_DEVICE); | ||
510 | 610 | ||
611 | curr_lcla = next_lcla; | ||
612 | |||
613 | if (curr_lcla == -EINVAL || curr_lcla == first_lcla) { | ||
614 | lli_current++; | ||
615 | break; | ||
511 | } | 616 | } |
512 | } | 617 | } |
618 | |||
619 | out: | ||
620 | desc->lli_current = lli_current; | ||
621 | } | ||
622 | |||
623 | static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) | ||
624 | { | ||
625 | if (chan_is_physical(d40c)) { | ||
626 | d40_phy_lli_load(d40c, d40d); | ||
627 | d40d->lli_current = d40d->lli_len; | ||
628 | } else | ||
629 | d40_log_lli_to_lcxa(d40c, d40d); | ||
513 | } | 630 | } |
514 | 631 | ||
515 | static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) | 632 | static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) |
@@ -543,18 +660,6 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c) | |||
543 | return d; | 660 | return d; |
544 | } | 661 | } |
545 | 662 | ||
546 | static struct d40_desc *d40_last_queued(struct d40_chan *d40c) | ||
547 | { | ||
548 | struct d40_desc *d; | ||
549 | |||
550 | if (list_empty(&d40c->queue)) | ||
551 | return NULL; | ||
552 | list_for_each_entry(d, &d40c->queue, node) | ||
553 | if (list_is_last(&d->node, &d40c->queue)) | ||
554 | break; | ||
555 | return d; | ||
556 | } | ||
557 | |||
558 | static int d40_psize_2_burst_size(bool is_log, int psize) | 663 | static int d40_psize_2_burst_size(bool is_log, int psize) |
559 | { | 664 | { |
560 | if (is_log) { | 665 | if (is_log) { |
@@ -666,9 +771,9 @@ static int d40_channel_execute_command(struct d40_chan *d40c, | |||
666 | } | 771 | } |
667 | 772 | ||
668 | if (i == D40_SUSPEND_MAX_IT) { | 773 | if (i == D40_SUSPEND_MAX_IT) { |
669 | dev_err(&d40c->chan.dev->device, | 774 | chan_err(d40c, |
670 | "[%s]: unable to suspend the chl %d (log: %d) status %x\n", | 775 | "unable to suspend the chl %d (log: %d) status %x\n", |
671 | __func__, d40c->phy_chan->num, d40c->log_num, | 776 | d40c->phy_chan->num, d40c->log_num, |
672 | status); | 777 | status); |
673 | dump_stack(); | 778 | dump_stack(); |
674 | ret = -EBUSY; | 779 | ret = -EBUSY; |
@@ -701,17 +806,45 @@ static void d40_term_all(struct d40_chan *d40c) | |||
701 | d40c->busy = false; | 806 | d40c->busy = false; |
702 | } | 807 | } |
703 | 808 | ||
809 | static void __d40_config_set_event(struct d40_chan *d40c, bool enable, | ||
810 | u32 event, int reg) | ||
811 | { | ||
812 | void __iomem *addr = chan_base(d40c) + reg; | ||
813 | int tries; | ||
814 | |||
815 | if (!enable) { | ||
816 | writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) | ||
817 | | ~D40_EVENTLINE_MASK(event), addr); | ||
818 | return; | ||
819 | } | ||
820 | |||
821 | /* | ||
822 | * The hardware sometimes doesn't register the enable when src and dst | ||
823 | * event lines are active on the same logical channel. Retry to ensure | ||
824 | * it does. Usually only one retry is sufficient. | ||
825 | */ | ||
826 | tries = 100; | ||
827 | while (--tries) { | ||
828 | writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) | ||
829 | | ~D40_EVENTLINE_MASK(event), addr); | ||
830 | |||
831 | if (readl(addr) & D40_EVENTLINE_MASK(event)) | ||
832 | break; | ||
833 | } | ||
834 | |||
835 | if (tries != 99) | ||
836 | dev_dbg(chan2dev(d40c), | ||
837 | "[%s] workaround enable S%cLNK (%d tries)\n", | ||
838 | __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D', | ||
839 | 100 - tries); | ||
840 | |||
841 | WARN_ON(!tries); | ||
842 | } | ||
843 | |||
704 | static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) | 844 | static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) |
705 | { | 845 | { |
706 | u32 val; | ||
707 | unsigned long flags; | 846 | unsigned long flags; |
708 | 847 | ||
709 | /* Notice, that disable requires the physical channel to be stopped */ | ||
710 | if (do_enable) | ||
711 | val = D40_ACTIVATE_EVENTLINE; | ||
712 | else | ||
713 | val = D40_DEACTIVATE_EVENTLINE; | ||
714 | |||
715 | spin_lock_irqsave(&d40c->phy_chan->lock, flags); | 848 | spin_lock_irqsave(&d40c->phy_chan->lock, flags); |
716 | 849 | ||
717 | /* Enable event line connected to device (or memcpy) */ | 850 | /* Enable event line connected to device (or memcpy) */ |
@@ -719,20 +852,15 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) | |||
719 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { | 852 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { |
720 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | 853 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); |
721 | 854 | ||
722 | writel((val << D40_EVENTLINE_POS(event)) | | 855 | __d40_config_set_event(d40c, do_enable, event, |
723 | ~D40_EVENTLINE_MASK(event), | 856 | D40_CHAN_REG_SSLNK); |
724 | d40c->base->virtbase + D40_DREG_PCBASE + | ||
725 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
726 | D40_CHAN_REG_SSLNK); | ||
727 | } | 857 | } |
858 | |||
728 | if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { | 859 | if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { |
729 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); | 860 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); |
730 | 861 | ||
731 | writel((val << D40_EVENTLINE_POS(event)) | | 862 | __d40_config_set_event(d40c, do_enable, event, |
732 | ~D40_EVENTLINE_MASK(event), | 863 | D40_CHAN_REG_SDLNK); |
733 | d40c->base->virtbase + D40_DREG_PCBASE + | ||
734 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
735 | D40_CHAN_REG_SDLNK); | ||
736 | } | 864 | } |
737 | 865 | ||
738 | spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); | 866 | spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); |
@@ -740,15 +868,12 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) | |||
740 | 868 | ||
741 | static u32 d40_chan_has_events(struct d40_chan *d40c) | 869 | static u32 d40_chan_has_events(struct d40_chan *d40c) |
742 | { | 870 | { |
871 | void __iomem *chanbase = chan_base(d40c); | ||
743 | u32 val; | 872 | u32 val; |
744 | 873 | ||
745 | val = readl(d40c->base->virtbase + D40_DREG_PCBASE + | 874 | val = readl(chanbase + D40_CHAN_REG_SSLNK); |
746 | d40c->phy_chan->num * D40_DREG_PCDELTA + | 875 | val |= readl(chanbase + D40_CHAN_REG_SDLNK); |
747 | D40_CHAN_REG_SSLNK); | ||
748 | 876 | ||
749 | val |= readl(d40c->base->virtbase + D40_DREG_PCBASE + | ||
750 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
751 | D40_CHAN_REG_SDLNK); | ||
752 | return val; | 877 | return val; |
753 | } | 878 | } |
754 | 879 | ||
@@ -771,7 +896,7 @@ static u32 d40_get_prmo(struct d40_chan *d40c) | |||
771 | = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG, | 896 | = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG, |
772 | }; | 897 | }; |
773 | 898 | ||
774 | if (d40c->log_num == D40_PHY_CHAN) | 899 | if (chan_is_physical(d40c)) |
775 | return phy_map[d40c->dma_cfg.mode_opt]; | 900 | return phy_map[d40c->dma_cfg.mode_opt]; |
776 | else | 901 | else |
777 | return log_map[d40c->dma_cfg.mode_opt]; | 902 | return log_map[d40c->dma_cfg.mode_opt]; |
@@ -785,7 +910,7 @@ static void d40_config_write(struct d40_chan *d40c) | |||
785 | /* Odd addresses are even addresses + 4 */ | 910 | /* Odd addresses are even addresses + 4 */ |
786 | addr_base = (d40c->phy_chan->num % 2) * 4; | 911 | addr_base = (d40c->phy_chan->num % 2) * 4; |
787 | /* Setup channel mode to logical or physical */ | 912 | /* Setup channel mode to logical or physical */ |
788 | var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) << | 913 | var = ((u32)(chan_is_logical(d40c)) + 1) << |
789 | D40_CHAN_POS(d40c->phy_chan->num); | 914 | D40_CHAN_POS(d40c->phy_chan->num); |
790 | writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); | 915 | writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); |
791 | 916 | ||
@@ -794,30 +919,18 @@ static void d40_config_write(struct d40_chan *d40c) | |||
794 | 919 | ||
795 | writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); | 920 | writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); |
796 | 921 | ||
797 | if (d40c->log_num != D40_PHY_CHAN) { | 922 | if (chan_is_logical(d40c)) { |
923 | int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) | ||
924 | & D40_SREG_ELEM_LOG_LIDX_MASK; | ||
925 | void __iomem *chanbase = chan_base(d40c); | ||
926 | |||
798 | /* Set default config for CFG reg */ | 927 | /* Set default config for CFG reg */ |
799 | writel(d40c->src_def_cfg, | 928 | writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG); |
800 | d40c->base->virtbase + D40_DREG_PCBASE + | 929 | writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG); |
801 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
802 | D40_CHAN_REG_SSCFG); | ||
803 | writel(d40c->dst_def_cfg, | ||
804 | d40c->base->virtbase + D40_DREG_PCBASE + | ||
805 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
806 | D40_CHAN_REG_SDCFG); | ||
807 | 930 | ||
808 | /* Set LIDX for lcla */ | 931 | /* Set LIDX for lcla */ |
809 | writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & | 932 | writel(lidx, chanbase + D40_CHAN_REG_SSELT); |
810 | D40_SREG_ELEM_LOG_LIDX_MASK, | 933 | writel(lidx, chanbase + D40_CHAN_REG_SDELT); |
811 | d40c->base->virtbase + D40_DREG_PCBASE + | ||
812 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
813 | D40_CHAN_REG_SDELT); | ||
814 | |||
815 | writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & | ||
816 | D40_SREG_ELEM_LOG_LIDX_MASK, | ||
817 | d40c->base->virtbase + D40_DREG_PCBASE + | ||
818 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
819 | D40_CHAN_REG_SSELT); | ||
820 | |||
821 | } | 934 | } |
822 | } | 935 | } |
823 | 936 | ||
@@ -825,15 +938,15 @@ static u32 d40_residue(struct d40_chan *d40c) | |||
825 | { | 938 | { |
826 | u32 num_elt; | 939 | u32 num_elt; |
827 | 940 | ||
828 | if (d40c->log_num != D40_PHY_CHAN) | 941 | if (chan_is_logical(d40c)) |
829 | num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) | 942 | num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) |
830 | >> D40_MEM_LCSP2_ECNT_POS; | 943 | >> D40_MEM_LCSP2_ECNT_POS; |
831 | else | 944 | else { |
832 | num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE + | 945 | u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT); |
833 | d40c->phy_chan->num * D40_DREG_PCDELTA + | 946 | num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK) |
834 | D40_CHAN_REG_SDELT) & | 947 | >> D40_SREG_ELEM_PHY_ECNT_POS; |
835 | D40_SREG_ELEM_PHY_ECNT_MASK) >> | 948 | } |
836 | D40_SREG_ELEM_PHY_ECNT_POS; | 949 | |
837 | return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); | 950 | return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); |
838 | } | 951 | } |
839 | 952 | ||
@@ -841,20 +954,17 @@ static bool d40_tx_is_linked(struct d40_chan *d40c) | |||
841 | { | 954 | { |
842 | bool is_link; | 955 | bool is_link; |
843 | 956 | ||
844 | if (d40c->log_num != D40_PHY_CHAN) | 957 | if (chan_is_logical(d40c)) |
845 | is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; | 958 | is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; |
846 | else | 959 | else |
847 | is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE + | 960 | is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK) |
848 | d40c->phy_chan->num * D40_DREG_PCDELTA + | 961 | & D40_SREG_LNK_PHYS_LNK_MASK; |
849 | D40_CHAN_REG_SDLNK) & | 962 | |
850 | D40_SREG_LNK_PHYS_LNK_MASK; | ||
851 | return is_link; | 963 | return is_link; |
852 | } | 964 | } |
853 | 965 | ||
854 | static int d40_pause(struct dma_chan *chan) | 966 | static int d40_pause(struct d40_chan *d40c) |
855 | { | 967 | { |
856 | struct d40_chan *d40c = | ||
857 | container_of(chan, struct d40_chan, chan); | ||
858 | int res = 0; | 968 | int res = 0; |
859 | unsigned long flags; | 969 | unsigned long flags; |
860 | 970 | ||
@@ -865,7 +975,7 @@ static int d40_pause(struct dma_chan *chan) | |||
865 | 975 | ||
866 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 976 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); |
867 | if (res == 0) { | 977 | if (res == 0) { |
868 | if (d40c->log_num != D40_PHY_CHAN) { | 978 | if (chan_is_logical(d40c)) { |
869 | d40_config_set_event(d40c, false); | 979 | d40_config_set_event(d40c, false); |
870 | /* Resume the other logical channels if any */ | 980 | /* Resume the other logical channels if any */ |
871 | if (d40_chan_has_events(d40c)) | 981 | if (d40_chan_has_events(d40c)) |
@@ -878,10 +988,8 @@ static int d40_pause(struct dma_chan *chan) | |||
878 | return res; | 988 | return res; |
879 | } | 989 | } |
880 | 990 | ||
881 | static int d40_resume(struct dma_chan *chan) | 991 | static int d40_resume(struct d40_chan *d40c) |
882 | { | 992 | { |
883 | struct d40_chan *d40c = | ||
884 | container_of(chan, struct d40_chan, chan); | ||
885 | int res = 0; | 993 | int res = 0; |
886 | unsigned long flags; | 994 | unsigned long flags; |
887 | 995 | ||
@@ -891,7 +999,7 @@ static int d40_resume(struct dma_chan *chan) | |||
891 | spin_lock_irqsave(&d40c->lock, flags); | 999 | spin_lock_irqsave(&d40c->lock, flags); |
892 | 1000 | ||
893 | if (d40c->base->rev == 0) | 1001 | if (d40c->base->rev == 0) |
894 | if (d40c->log_num != D40_PHY_CHAN) { | 1002 | if (chan_is_logical(d40c)) { |
895 | res = d40_channel_execute_command(d40c, | 1003 | res = d40_channel_execute_command(d40c, |
896 | D40_DMA_SUSPEND_REQ); | 1004 | D40_DMA_SUSPEND_REQ); |
897 | goto no_suspend; | 1005 | goto no_suspend; |
@@ -900,7 +1008,7 @@ static int d40_resume(struct dma_chan *chan) | |||
900 | /* If bytes left to transfer or linked tx resume job */ | 1008 | /* If bytes left to transfer or linked tx resume job */ |
901 | if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { | 1009 | if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { |
902 | 1010 | ||
903 | if (d40c->log_num != D40_PHY_CHAN) | 1011 | if (chan_is_logical(d40c)) |
904 | d40_config_set_event(d40c, true); | 1012 | d40_config_set_event(d40c, true); |
905 | 1013 | ||
906 | res = d40_channel_execute_command(d40c, D40_DMA_RUN); | 1014 | res = d40_channel_execute_command(d40c, D40_DMA_RUN); |
@@ -911,75 +1019,20 @@ no_suspend: | |||
911 | return res; | 1019 | return res; |
912 | } | 1020 | } |
913 | 1021 | ||
914 | static void d40_tx_submit_log(struct d40_chan *d40c, struct d40_desc *d40d) | 1022 | static int d40_terminate_all(struct d40_chan *chan) |
915 | { | 1023 | { |
916 | /* TODO: Write */ | 1024 | unsigned long flags; |
917 | } | 1025 | int ret = 0; |
918 | |||
919 | static void d40_tx_submit_phy(struct d40_chan *d40c, struct d40_desc *d40d) | ||
920 | { | ||
921 | struct d40_desc *d40d_prev = NULL; | ||
922 | int i; | ||
923 | u32 val; | ||
924 | |||
925 | if (!list_empty(&d40c->queue)) | ||
926 | d40d_prev = d40_last_queued(d40c); | ||
927 | else if (!list_empty(&d40c->active)) | ||
928 | d40d_prev = d40_first_active_get(d40c); | ||
929 | |||
930 | if (!d40d_prev) | ||
931 | return; | ||
932 | |||
933 | /* Here we try to join this job with previous jobs */ | ||
934 | val = readl(d40c->base->virtbase + D40_DREG_PCBASE + | ||
935 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
936 | D40_CHAN_REG_SSLNK); | ||
937 | |||
938 | /* Figure out which link we're currently transmitting */ | ||
939 | for (i = 0; i < d40d_prev->lli_len; i++) | ||
940 | if (val == d40d_prev->lli_phy.src[i].reg_lnk) | ||
941 | break; | ||
942 | |||
943 | val = readl(d40c->base->virtbase + D40_DREG_PCBASE + | ||
944 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
945 | D40_CHAN_REG_SSELT) >> D40_SREG_ELEM_LOG_ECNT_POS; | ||
946 | |||
947 | if (i == (d40d_prev->lli_len - 1) && val > 0) { | ||
948 | /* Change the current one */ | ||
949 | writel(virt_to_phys(d40d->lli_phy.src), | ||
950 | d40c->base->virtbase + D40_DREG_PCBASE + | ||
951 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
952 | D40_CHAN_REG_SSLNK); | ||
953 | writel(virt_to_phys(d40d->lli_phy.dst), | ||
954 | d40c->base->virtbase + D40_DREG_PCBASE + | ||
955 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
956 | D40_CHAN_REG_SDLNK); | ||
957 | |||
958 | d40d->is_hw_linked = true; | ||
959 | |||
960 | } else if (i < d40d_prev->lli_len) { | ||
961 | (void) dma_unmap_single(d40c->base->dev, | ||
962 | virt_to_phys(d40d_prev->lli_phy.src), | ||
963 | d40d_prev->lli_pool.size, | ||
964 | DMA_TO_DEVICE); | ||
965 | 1026 | ||
966 | /* Keep the settings */ | 1027 | ret = d40_pause(chan); |
967 | val = d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk & | 1028 | if (!ret && chan_is_physical(chan)) |
968 | ~D40_SREG_LNK_PHYS_LNK_MASK; | 1029 | ret = d40_channel_execute_command(chan, D40_DMA_STOP); |
969 | d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk = | ||
970 | val | virt_to_phys(d40d->lli_phy.src); | ||
971 | 1030 | ||
972 | val = d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk & | 1031 | spin_lock_irqsave(&chan->lock, flags); |
973 | ~D40_SREG_LNK_PHYS_LNK_MASK; | 1032 | d40_term_all(chan); |
974 | d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk = | 1033 | spin_unlock_irqrestore(&chan->lock, flags); |
975 | val | virt_to_phys(d40d->lli_phy.dst); | ||
976 | 1034 | ||
977 | (void) dma_map_single(d40c->base->dev, | 1035 | return ret; |
978 | d40d_prev->lli_phy.src, | ||
979 | d40d_prev->lli_pool.size, | ||
980 | DMA_TO_DEVICE); | ||
981 | d40d->is_hw_linked = true; | ||
982 | } | ||
983 | } | 1036 | } |
984 | 1037 | ||
985 | static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) | 1038 | static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) |
@@ -990,8 +1043,6 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) | |||
990 | struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); | 1043 | struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); |
991 | unsigned long flags; | 1044 | unsigned long flags; |
992 | 1045 | ||
993 | (void) d40_pause(&d40c->chan); | ||
994 | |||
995 | spin_lock_irqsave(&d40c->lock, flags); | 1046 | spin_lock_irqsave(&d40c->lock, flags); |
996 | 1047 | ||
997 | d40c->chan.cookie++; | 1048 | d40c->chan.cookie++; |
@@ -1001,17 +1052,10 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) | |||
1001 | 1052 | ||
1002 | d40d->txd.cookie = d40c->chan.cookie; | 1053 | d40d->txd.cookie = d40c->chan.cookie; |
1003 | 1054 | ||
1004 | if (d40c->log_num == D40_PHY_CHAN) | ||
1005 | d40_tx_submit_phy(d40c, d40d); | ||
1006 | else | ||
1007 | d40_tx_submit_log(d40c, d40d); | ||
1008 | |||
1009 | d40_desc_queue(d40c, d40d); | 1055 | d40_desc_queue(d40c, d40d); |
1010 | 1056 | ||
1011 | spin_unlock_irqrestore(&d40c->lock, flags); | 1057 | spin_unlock_irqrestore(&d40c->lock, flags); |
1012 | 1058 | ||
1013 | (void) d40_resume(&d40c->chan); | ||
1014 | |||
1015 | return tx->cookie; | 1059 | return tx->cookie; |
1016 | } | 1060 | } |
1017 | 1061 | ||
@@ -1020,7 +1064,7 @@ static int d40_start(struct d40_chan *d40c) | |||
1020 | if (d40c->base->rev == 0) { | 1064 | if (d40c->base->rev == 0) { |
1021 | int err; | 1065 | int err; |
1022 | 1066 | ||
1023 | if (d40c->log_num != D40_PHY_CHAN) { | 1067 | if (chan_is_logical(d40c)) { |
1024 | err = d40_channel_execute_command(d40c, | 1068 | err = d40_channel_execute_command(d40c, |
1025 | D40_DMA_SUSPEND_REQ); | 1069 | D40_DMA_SUSPEND_REQ); |
1026 | if (err) | 1070 | if (err) |
@@ -1028,7 +1072,7 @@ static int d40_start(struct d40_chan *d40c) | |||
1028 | } | 1072 | } |
1029 | } | 1073 | } |
1030 | 1074 | ||
1031 | if (d40c->log_num != D40_PHY_CHAN) | 1075 | if (chan_is_logical(d40c)) |
1032 | d40_config_set_event(d40c, true); | 1076 | d40_config_set_event(d40c, true); |
1033 | 1077 | ||
1034 | return d40_channel_execute_command(d40c, D40_DMA_RUN); | 1078 | return d40_channel_execute_command(d40c, D40_DMA_RUN); |
@@ -1051,21 +1095,14 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c) | |||
1051 | /* Add to active queue */ | 1095 | /* Add to active queue */ |
1052 | d40_desc_submit(d40c, d40d); | 1096 | d40_desc_submit(d40c, d40d); |
1053 | 1097 | ||
1054 | /* | 1098 | /* Initiate DMA job */ |
1055 | * If this job is already linked in hw, | 1099 | d40_desc_load(d40c, d40d); |
1056 | * do not submit it. | ||
1057 | */ | ||
1058 | |||
1059 | if (!d40d->is_hw_linked) { | ||
1060 | /* Initiate DMA job */ | ||
1061 | d40_desc_load(d40c, d40d); | ||
1062 | 1100 | ||
1063 | /* Start dma job */ | 1101 | /* Start dma job */ |
1064 | err = d40_start(d40c); | 1102 | err = d40_start(d40c); |
1065 | 1103 | ||
1066 | if (err) | 1104 | if (err) |
1067 | return NULL; | 1105 | return NULL; |
1068 | } | ||
1069 | } | 1106 | } |
1070 | 1107 | ||
1071 | return d40d; | 1108 | return d40d; |
@@ -1082,17 +1119,36 @@ static void dma_tc_handle(struct d40_chan *d40c) | |||
1082 | if (d40d == NULL) | 1119 | if (d40d == NULL) |
1083 | return; | 1120 | return; |
1084 | 1121 | ||
1085 | d40_lcla_free_all(d40c, d40d); | 1122 | if (d40d->cyclic) { |
1123 | /* | ||
1124 | * If this was a paritially loaded list, we need to reloaded | ||
1125 | * it, and only when the list is completed. We need to check | ||
1126 | * for done because the interrupt will hit for every link, and | ||
1127 | * not just the last one. | ||
1128 | */ | ||
1129 | if (d40d->lli_current < d40d->lli_len | ||
1130 | && !d40_tx_is_linked(d40c) | ||
1131 | && !d40_residue(d40c)) { | ||
1132 | d40_lcla_free_all(d40c, d40d); | ||
1133 | d40_desc_load(d40c, d40d); | ||
1134 | (void) d40_start(d40c); | ||
1086 | 1135 | ||
1087 | if (d40d->lli_current < d40d->lli_len) { | 1136 | if (d40d->lli_current == d40d->lli_len) |
1088 | d40_desc_load(d40c, d40d); | 1137 | d40d->lli_current = 0; |
1089 | /* Start dma job */ | 1138 | } |
1090 | (void) d40_start(d40c); | 1139 | } else { |
1091 | return; | 1140 | d40_lcla_free_all(d40c, d40d); |
1092 | } | ||
1093 | 1141 | ||
1094 | if (d40_queue_start(d40c) == NULL) | 1142 | if (d40d->lli_current < d40d->lli_len) { |
1095 | d40c->busy = false; | 1143 | d40_desc_load(d40c, d40d); |
1144 | /* Start dma job */ | ||
1145 | (void) d40_start(d40c); | ||
1146 | return; | ||
1147 | } | ||
1148 | |||
1149 | if (d40_queue_start(d40c) == NULL) | ||
1150 | d40c->busy = false; | ||
1151 | } | ||
1096 | 1152 | ||
1097 | d40c->pending_tx++; | 1153 | d40c->pending_tx++; |
1098 | tasklet_schedule(&d40c->tasklet); | 1154 | tasklet_schedule(&d40c->tasklet); |
@@ -1111,11 +1167,11 @@ static void dma_tasklet(unsigned long data) | |||
1111 | 1167 | ||
1112 | /* Get first active entry from list */ | 1168 | /* Get first active entry from list */ |
1113 | d40d = d40_first_active_get(d40c); | 1169 | d40d = d40_first_active_get(d40c); |
1114 | |||
1115 | if (d40d == NULL) | 1170 | if (d40d == NULL) |
1116 | goto err; | 1171 | goto err; |
1117 | 1172 | ||
1118 | d40c->completed = d40d->txd.cookie; | 1173 | if (!d40d->cyclic) |
1174 | d40c->completed = d40d->txd.cookie; | ||
1119 | 1175 | ||
1120 | /* | 1176 | /* |
1121 | * If terminating a channel pending_tx is set to zero. | 1177 | * If terminating a channel pending_tx is set to zero. |
@@ -1130,16 +1186,18 @@ static void dma_tasklet(unsigned long data) | |||
1130 | callback = d40d->txd.callback; | 1186 | callback = d40d->txd.callback; |
1131 | callback_param = d40d->txd.callback_param; | 1187 | callback_param = d40d->txd.callback_param; |
1132 | 1188 | ||
1133 | if (async_tx_test_ack(&d40d->txd)) { | 1189 | if (!d40d->cyclic) { |
1134 | d40_pool_lli_free(d40d); | 1190 | if (async_tx_test_ack(&d40d->txd)) { |
1135 | d40_desc_remove(d40d); | 1191 | d40_pool_lli_free(d40c, d40d); |
1136 | d40_desc_free(d40c, d40d); | ||
1137 | } else { | ||
1138 | if (!d40d->is_in_client_list) { | ||
1139 | d40_desc_remove(d40d); | 1192 | d40_desc_remove(d40d); |
1140 | d40_lcla_free_all(d40c, d40d); | 1193 | d40_desc_free(d40c, d40d); |
1141 | list_add_tail(&d40d->node, &d40c->client); | 1194 | } else { |
1142 | d40d->is_in_client_list = true; | 1195 | if (!d40d->is_in_client_list) { |
1196 | d40_desc_remove(d40d); | ||
1197 | d40_lcla_free_all(d40c, d40d); | ||
1198 | list_add_tail(&d40d->node, &d40c->client); | ||
1199 | d40d->is_in_client_list = true; | ||
1200 | } | ||
1143 | } | 1201 | } |
1144 | } | 1202 | } |
1145 | 1203 | ||
@@ -1216,9 +1274,8 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data) | |||
1216 | if (!il[row].is_error) | 1274 | if (!il[row].is_error) |
1217 | dma_tc_handle(d40c); | 1275 | dma_tc_handle(d40c); |
1218 | else | 1276 | else |
1219 | dev_err(base->dev, | 1277 | d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n", |
1220 | "[%s] IRQ chan: %ld offset %d idx %d\n", | 1278 | chan, il[row].offset, idx); |
1221 | __func__, chan, il[row].offset, idx); | ||
1222 | 1279 | ||
1223 | spin_unlock(&d40c->lock); | 1280 | spin_unlock(&d40c->lock); |
1224 | } | 1281 | } |
@@ -1237,8 +1294,7 @@ static int d40_validate_conf(struct d40_chan *d40c, | |||
1237 | bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; | 1294 | bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; |
1238 | 1295 | ||
1239 | if (!conf->dir) { | 1296 | if (!conf->dir) { |
1240 | dev_err(&d40c->chan.dev->device, "[%s] Invalid direction.\n", | 1297 | chan_err(d40c, "Invalid direction.\n"); |
1241 | __func__); | ||
1242 | res = -EINVAL; | 1298 | res = -EINVAL; |
1243 | } | 1299 | } |
1244 | 1300 | ||
@@ -1246,46 +1302,40 @@ static int d40_validate_conf(struct d40_chan *d40c, | |||
1246 | d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 && | 1302 | d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 && |
1247 | d40c->runtime_addr == 0) { | 1303 | d40c->runtime_addr == 0) { |
1248 | 1304 | ||
1249 | dev_err(&d40c->chan.dev->device, | 1305 | chan_err(d40c, "Invalid TX channel address (%d)\n", |
1250 | "[%s] Invalid TX channel address (%d)\n", | 1306 | conf->dst_dev_type); |
1251 | __func__, conf->dst_dev_type); | ||
1252 | res = -EINVAL; | 1307 | res = -EINVAL; |
1253 | } | 1308 | } |
1254 | 1309 | ||
1255 | if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY && | 1310 | if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY && |
1256 | d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 && | 1311 | d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 && |
1257 | d40c->runtime_addr == 0) { | 1312 | d40c->runtime_addr == 0) { |
1258 | dev_err(&d40c->chan.dev->device, | 1313 | chan_err(d40c, "Invalid RX channel address (%d)\n", |
1259 | "[%s] Invalid RX channel address (%d)\n", | 1314 | conf->src_dev_type); |
1260 | __func__, conf->src_dev_type); | ||
1261 | res = -EINVAL; | 1315 | res = -EINVAL; |
1262 | } | 1316 | } |
1263 | 1317 | ||
1264 | if (conf->dir == STEDMA40_MEM_TO_PERIPH && | 1318 | if (conf->dir == STEDMA40_MEM_TO_PERIPH && |
1265 | dst_event_group == STEDMA40_DEV_DST_MEMORY) { | 1319 | dst_event_group == STEDMA40_DEV_DST_MEMORY) { |
1266 | dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n", | 1320 | chan_err(d40c, "Invalid dst\n"); |
1267 | __func__); | ||
1268 | res = -EINVAL; | 1321 | res = -EINVAL; |
1269 | } | 1322 | } |
1270 | 1323 | ||
1271 | if (conf->dir == STEDMA40_PERIPH_TO_MEM && | 1324 | if (conf->dir == STEDMA40_PERIPH_TO_MEM && |
1272 | src_event_group == STEDMA40_DEV_SRC_MEMORY) { | 1325 | src_event_group == STEDMA40_DEV_SRC_MEMORY) { |
1273 | dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n", | 1326 | chan_err(d40c, "Invalid src\n"); |
1274 | __func__); | ||
1275 | res = -EINVAL; | 1327 | res = -EINVAL; |
1276 | } | 1328 | } |
1277 | 1329 | ||
1278 | if (src_event_group == STEDMA40_DEV_SRC_MEMORY && | 1330 | if (src_event_group == STEDMA40_DEV_SRC_MEMORY && |
1279 | dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) { | 1331 | dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) { |
1280 | dev_err(&d40c->chan.dev->device, | 1332 | chan_err(d40c, "No event line\n"); |
1281 | "[%s] No event line\n", __func__); | ||
1282 | res = -EINVAL; | 1333 | res = -EINVAL; |
1283 | } | 1334 | } |
1284 | 1335 | ||
1285 | if (conf->dir == STEDMA40_PERIPH_TO_PERIPH && | 1336 | if (conf->dir == STEDMA40_PERIPH_TO_PERIPH && |
1286 | (src_event_group != dst_event_group)) { | 1337 | (src_event_group != dst_event_group)) { |
1287 | dev_err(&d40c->chan.dev->device, | 1338 | chan_err(d40c, "Invalid event group\n"); |
1288 | "[%s] Invalid event group\n", __func__); | ||
1289 | res = -EINVAL; | 1339 | res = -EINVAL; |
1290 | } | 1340 | } |
1291 | 1341 | ||
@@ -1294,9 +1344,7 @@ static int d40_validate_conf(struct d40_chan *d40c, | |||
1294 | * DMAC HW supports it. Will be added to this driver, | 1344 | * DMAC HW supports it. Will be added to this driver, |
1295 | * in case any dma client requires it. | 1345 | * in case any dma client requires it. |
1296 | */ | 1346 | */ |
1297 | dev_err(&d40c->chan.dev->device, | 1347 | chan_err(d40c, "periph to periph not supported\n"); |
1298 | "[%s] periph to periph not supported\n", | ||
1299 | __func__); | ||
1300 | res = -EINVAL; | 1348 | res = -EINVAL; |
1301 | } | 1349 | } |
1302 | 1350 | ||
@@ -1309,9 +1357,7 @@ static int d40_validate_conf(struct d40_chan *d40c, | |||
1309 | * src (burst x width) == dst (burst x width) | 1357 | * src (burst x width) == dst (burst x width) |
1310 | */ | 1358 | */ |
1311 | 1359 | ||
1312 | dev_err(&d40c->chan.dev->device, | 1360 | chan_err(d40c, "src (burst x width) != dst (burst x width)\n"); |
1313 | "[%s] src (burst x width) != dst (burst x width)\n", | ||
1314 | __func__); | ||
1315 | res = -EINVAL; | 1361 | res = -EINVAL; |
1316 | } | 1362 | } |
1317 | 1363 | ||
@@ -1514,8 +1560,7 @@ static int d40_config_memcpy(struct d40_chan *d40c) | |||
1514 | dma_has_cap(DMA_SLAVE, cap)) { | 1560 | dma_has_cap(DMA_SLAVE, cap)) { |
1515 | d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy; | 1561 | d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy; |
1516 | } else { | 1562 | } else { |
1517 | dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n", | 1563 | chan_err(d40c, "No memcpy\n"); |
1518 | __func__); | ||
1519 | return -EINVAL; | 1564 | return -EINVAL; |
1520 | } | 1565 | } |
1521 | 1566 | ||
@@ -1540,21 +1585,19 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1540 | /* Release client owned descriptors */ | 1585 | /* Release client owned descriptors */ |
1541 | if (!list_empty(&d40c->client)) | 1586 | if (!list_empty(&d40c->client)) |
1542 | list_for_each_entry_safe(d, _d, &d40c->client, node) { | 1587 | list_for_each_entry_safe(d, _d, &d40c->client, node) { |
1543 | d40_pool_lli_free(d); | 1588 | d40_pool_lli_free(d40c, d); |
1544 | d40_desc_remove(d); | 1589 | d40_desc_remove(d); |
1545 | d40_desc_free(d40c, d); | 1590 | d40_desc_free(d40c, d); |
1546 | } | 1591 | } |
1547 | 1592 | ||
1548 | if (phy == NULL) { | 1593 | if (phy == NULL) { |
1549 | dev_err(&d40c->chan.dev->device, "[%s] phy == null\n", | 1594 | chan_err(d40c, "phy == null\n"); |
1550 | __func__); | ||
1551 | return -EINVAL; | 1595 | return -EINVAL; |
1552 | } | 1596 | } |
1553 | 1597 | ||
1554 | if (phy->allocated_src == D40_ALLOC_FREE && | 1598 | if (phy->allocated_src == D40_ALLOC_FREE && |
1555 | phy->allocated_dst == D40_ALLOC_FREE) { | 1599 | phy->allocated_dst == D40_ALLOC_FREE) { |
1556 | dev_err(&d40c->chan.dev->device, "[%s] channel already free\n", | 1600 | chan_err(d40c, "channel already free\n"); |
1557 | __func__); | ||
1558 | return -EINVAL; | 1601 | return -EINVAL; |
1559 | } | 1602 | } |
1560 | 1603 | ||
@@ -1566,19 +1609,17 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1566 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | 1609 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); |
1567 | is_src = true; | 1610 | is_src = true; |
1568 | } else { | 1611 | } else { |
1569 | dev_err(&d40c->chan.dev->device, | 1612 | chan_err(d40c, "Unknown direction\n"); |
1570 | "[%s] Unknown direction\n", __func__); | ||
1571 | return -EINVAL; | 1613 | return -EINVAL; |
1572 | } | 1614 | } |
1573 | 1615 | ||
1574 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 1616 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); |
1575 | if (res) { | 1617 | if (res) { |
1576 | dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n", | 1618 | chan_err(d40c, "suspend failed\n"); |
1577 | __func__); | ||
1578 | return res; | 1619 | return res; |
1579 | } | 1620 | } |
1580 | 1621 | ||
1581 | if (d40c->log_num != D40_PHY_CHAN) { | 1622 | if (chan_is_logical(d40c)) { |
1582 | /* Release logical channel, deactivate the event line */ | 1623 | /* Release logical channel, deactivate the event line */ |
1583 | 1624 | ||
1584 | d40_config_set_event(d40c, false); | 1625 | d40_config_set_event(d40c, false); |
@@ -1594,9 +1635,8 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1594 | res = d40_channel_execute_command(d40c, | 1635 | res = d40_channel_execute_command(d40c, |
1595 | D40_DMA_RUN); | 1636 | D40_DMA_RUN); |
1596 | if (res) { | 1637 | if (res) { |
1597 | dev_err(&d40c->chan.dev->device, | 1638 | chan_err(d40c, |
1598 | "[%s] Executing RUN command\n", | 1639 | "Executing RUN command\n"); |
1599 | __func__); | ||
1600 | return res; | 1640 | return res; |
1601 | } | 1641 | } |
1602 | } | 1642 | } |
@@ -1609,8 +1649,7 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1609 | /* Release physical channel */ | 1649 | /* Release physical channel */ |
1610 | res = d40_channel_execute_command(d40c, D40_DMA_STOP); | 1650 | res = d40_channel_execute_command(d40c, D40_DMA_STOP); |
1611 | if (res) { | 1651 | if (res) { |
1612 | dev_err(&d40c->chan.dev->device, | 1652 | chan_err(d40c, "Failed to stop channel\n"); |
1613 | "[%s] Failed to stop channel\n", __func__); | ||
1614 | return res; | 1653 | return res; |
1615 | } | 1654 | } |
1616 | d40c->phy_chan = NULL; | 1655 | d40c->phy_chan = NULL; |
@@ -1622,6 +1661,7 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1622 | 1661 | ||
1623 | static bool d40_is_paused(struct d40_chan *d40c) | 1662 | static bool d40_is_paused(struct d40_chan *d40c) |
1624 | { | 1663 | { |
1664 | void __iomem *chanbase = chan_base(d40c); | ||
1625 | bool is_paused = false; | 1665 | bool is_paused = false; |
1626 | unsigned long flags; | 1666 | unsigned long flags; |
1627 | void __iomem *active_reg; | 1667 | void __iomem *active_reg; |
@@ -1630,7 +1670,7 @@ static bool d40_is_paused(struct d40_chan *d40c) | |||
1630 | 1670 | ||
1631 | spin_lock_irqsave(&d40c->lock, flags); | 1671 | spin_lock_irqsave(&d40c->lock, flags); |
1632 | 1672 | ||
1633 | if (d40c->log_num == D40_PHY_CHAN) { | 1673 | if (chan_is_physical(d40c)) { |
1634 | if (d40c->phy_chan->num % 2 == 0) | 1674 | if (d40c->phy_chan->num % 2 == 0) |
1635 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; | 1675 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; |
1636 | else | 1676 | else |
@@ -1648,17 +1688,12 @@ static bool d40_is_paused(struct d40_chan *d40c) | |||
1648 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || | 1688 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || |
1649 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { | 1689 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { |
1650 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); | 1690 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); |
1651 | status = readl(d40c->base->virtbase + D40_DREG_PCBASE + | 1691 | status = readl(chanbase + D40_CHAN_REG_SDLNK); |
1652 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
1653 | D40_CHAN_REG_SDLNK); | ||
1654 | } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { | 1692 | } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { |
1655 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | 1693 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); |
1656 | status = readl(d40c->base->virtbase + D40_DREG_PCBASE + | 1694 | status = readl(chanbase + D40_CHAN_REG_SSLNK); |
1657 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
1658 | D40_CHAN_REG_SSLNK); | ||
1659 | } else { | 1695 | } else { |
1660 | dev_err(&d40c->chan.dev->device, | 1696 | chan_err(d40c, "Unknown direction\n"); |
1661 | "[%s] Unknown direction\n", __func__); | ||
1662 | goto _exit; | 1697 | goto _exit; |
1663 | } | 1698 | } |
1664 | 1699 | ||
@@ -1688,114 +1723,184 @@ static u32 stedma40_residue(struct dma_chan *chan) | |||
1688 | return bytes_left; | 1723 | return bytes_left; |
1689 | } | 1724 | } |
1690 | 1725 | ||
1691 | struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, | 1726 | static int |
1692 | struct scatterlist *sgl_dst, | 1727 | d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc, |
1693 | struct scatterlist *sgl_src, | 1728 | struct scatterlist *sg_src, struct scatterlist *sg_dst, |
1694 | unsigned int sgl_len, | 1729 | unsigned int sg_len, dma_addr_t src_dev_addr, |
1695 | unsigned long dma_flags) | 1730 | dma_addr_t dst_dev_addr) |
1696 | { | 1731 | { |
1697 | int res; | 1732 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; |
1698 | struct d40_desc *d40d; | 1733 | struct stedma40_half_channel_info *src_info = &cfg->src_info; |
1699 | struct d40_chan *d40c = container_of(chan, struct d40_chan, | 1734 | struct stedma40_half_channel_info *dst_info = &cfg->dst_info; |
1700 | chan); | 1735 | int ret; |
1701 | unsigned long flags; | ||
1702 | 1736 | ||
1703 | if (d40c->phy_chan == NULL) { | 1737 | ret = d40_log_sg_to_lli(sg_src, sg_len, |
1704 | dev_err(&d40c->chan.dev->device, | 1738 | src_dev_addr, |
1705 | "[%s] Unallocated channel.\n", __func__); | 1739 | desc->lli_log.src, |
1706 | return ERR_PTR(-EINVAL); | 1740 | chan->log_def.lcsp1, |
1707 | } | 1741 | src_info->data_width, |
1742 | dst_info->data_width); | ||
1708 | 1743 | ||
1709 | spin_lock_irqsave(&d40c->lock, flags); | 1744 | ret = d40_log_sg_to_lli(sg_dst, sg_len, |
1710 | d40d = d40_desc_get(d40c); | 1745 | dst_dev_addr, |
1746 | desc->lli_log.dst, | ||
1747 | chan->log_def.lcsp3, | ||
1748 | dst_info->data_width, | ||
1749 | src_info->data_width); | ||
1711 | 1750 | ||
1712 | if (d40d == NULL) | 1751 | return ret < 0 ? ret : 0; |
1752 | } | ||
1753 | |||
1754 | static int | ||
1755 | d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc, | ||
1756 | struct scatterlist *sg_src, struct scatterlist *sg_dst, | ||
1757 | unsigned int sg_len, dma_addr_t src_dev_addr, | ||
1758 | dma_addr_t dst_dev_addr) | ||
1759 | { | ||
1760 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; | ||
1761 | struct stedma40_half_channel_info *src_info = &cfg->src_info; | ||
1762 | struct stedma40_half_channel_info *dst_info = &cfg->dst_info; | ||
1763 | unsigned long flags = 0; | ||
1764 | int ret; | ||
1765 | |||
1766 | if (desc->cyclic) | ||
1767 | flags |= LLI_CYCLIC | LLI_TERM_INT; | ||
1768 | |||
1769 | ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr, | ||
1770 | desc->lli_phy.src, | ||
1771 | virt_to_phys(desc->lli_phy.src), | ||
1772 | chan->src_def_cfg, | ||
1773 | src_info, dst_info, flags); | ||
1774 | |||
1775 | ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr, | ||
1776 | desc->lli_phy.dst, | ||
1777 | virt_to_phys(desc->lli_phy.dst), | ||
1778 | chan->dst_def_cfg, | ||
1779 | dst_info, src_info, flags); | ||
1780 | |||
1781 | dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr, | ||
1782 | desc->lli_pool.size, DMA_TO_DEVICE); | ||
1783 | |||
1784 | return ret < 0 ? ret : 0; | ||
1785 | } | ||
1786 | |||
1787 | |||
1788 | static struct d40_desc * | ||
1789 | d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, | ||
1790 | unsigned int sg_len, unsigned long dma_flags) | ||
1791 | { | ||
1792 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; | ||
1793 | struct d40_desc *desc; | ||
1794 | int ret; | ||
1795 | |||
1796 | desc = d40_desc_get(chan); | ||
1797 | if (!desc) | ||
1798 | return NULL; | ||
1799 | |||
1800 | desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width, | ||
1801 | cfg->dst_info.data_width); | ||
1802 | if (desc->lli_len < 0) { | ||
1803 | chan_err(chan, "Unaligned size\n"); | ||
1713 | goto err; | 1804 | goto err; |
1805 | } | ||
1714 | 1806 | ||
1715 | d40d->lli_len = d40_sg_2_dmalen(sgl_dst, sgl_len, | 1807 | ret = d40_pool_lli_alloc(chan, desc, desc->lli_len); |
1716 | d40c->dma_cfg.src_info.data_width, | 1808 | if (ret < 0) { |
1717 | d40c->dma_cfg.dst_info.data_width); | 1809 | chan_err(chan, "Could not allocate lli\n"); |
1718 | if (d40d->lli_len < 0) { | ||
1719 | dev_err(&d40c->chan.dev->device, | ||
1720 | "[%s] Unaligned size\n", __func__); | ||
1721 | goto err; | 1810 | goto err; |
1722 | } | 1811 | } |
1723 | 1812 | ||
1724 | d40d->lli_current = 0; | ||
1725 | d40d->txd.flags = dma_flags; | ||
1726 | 1813 | ||
1727 | if (d40c->log_num != D40_PHY_CHAN) { | 1814 | desc->lli_current = 0; |
1815 | desc->txd.flags = dma_flags; | ||
1816 | desc->txd.tx_submit = d40_tx_submit; | ||
1728 | 1817 | ||
1729 | if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { | 1818 | dma_async_tx_descriptor_init(&desc->txd, &chan->chan); |
1730 | dev_err(&d40c->chan.dev->device, | ||
1731 | "[%s] Out of memory\n", __func__); | ||
1732 | goto err; | ||
1733 | } | ||
1734 | 1819 | ||
1735 | (void) d40_log_sg_to_lli(sgl_src, | 1820 | return desc; |
1736 | sgl_len, | 1821 | |
1737 | d40d->lli_log.src, | 1822 | err: |
1738 | d40c->log_def.lcsp1, | 1823 | d40_desc_free(chan, desc); |
1739 | d40c->dma_cfg.src_info.data_width, | 1824 | return NULL; |
1740 | d40c->dma_cfg.dst_info.data_width); | 1825 | } |
1741 | 1826 | ||
1742 | (void) d40_log_sg_to_lli(sgl_dst, | 1827 | static dma_addr_t |
1743 | sgl_len, | 1828 | d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction) |
1744 | d40d->lli_log.dst, | 1829 | { |
1745 | d40c->log_def.lcsp3, | 1830 | struct stedma40_platform_data *plat = chan->base->plat_data; |
1746 | d40c->dma_cfg.dst_info.data_width, | 1831 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; |
1747 | d40c->dma_cfg.src_info.data_width); | 1832 | dma_addr_t addr; |
1748 | } else { | ||
1749 | if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) { | ||
1750 | dev_err(&d40c->chan.dev->device, | ||
1751 | "[%s] Out of memory\n", __func__); | ||
1752 | goto err; | ||
1753 | } | ||
1754 | 1833 | ||
1755 | res = d40_phy_sg_to_lli(sgl_src, | 1834 | if (chan->runtime_addr) |
1756 | sgl_len, | 1835 | return chan->runtime_addr; |
1757 | 0, | ||
1758 | d40d->lli_phy.src, | ||
1759 | virt_to_phys(d40d->lli_phy.src), | ||
1760 | d40c->src_def_cfg, | ||
1761 | d40c->dma_cfg.src_info.data_width, | ||
1762 | d40c->dma_cfg.dst_info.data_width, | ||
1763 | d40c->dma_cfg.src_info.psize); | ||
1764 | 1836 | ||
1765 | if (res < 0) | 1837 | if (direction == DMA_FROM_DEVICE) |
1766 | goto err; | 1838 | addr = plat->dev_rx[cfg->src_dev_type]; |
1839 | else if (direction == DMA_TO_DEVICE) | ||
1840 | addr = plat->dev_tx[cfg->dst_dev_type]; | ||
1767 | 1841 | ||
1768 | res = d40_phy_sg_to_lli(sgl_dst, | 1842 | return addr; |
1769 | sgl_len, | 1843 | } |
1770 | 0, | ||
1771 | d40d->lli_phy.dst, | ||
1772 | virt_to_phys(d40d->lli_phy.dst), | ||
1773 | d40c->dst_def_cfg, | ||
1774 | d40c->dma_cfg.dst_info.data_width, | ||
1775 | d40c->dma_cfg.src_info.data_width, | ||
1776 | d40c->dma_cfg.dst_info.psize); | ||
1777 | 1844 | ||
1778 | if (res < 0) | 1845 | static struct dma_async_tx_descriptor * |
1779 | goto err; | 1846 | d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, |
1847 | struct scatterlist *sg_dst, unsigned int sg_len, | ||
1848 | enum dma_data_direction direction, unsigned long dma_flags) | ||
1849 | { | ||
1850 | struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); | ||
1851 | dma_addr_t src_dev_addr = 0; | ||
1852 | dma_addr_t dst_dev_addr = 0; | ||
1853 | struct d40_desc *desc; | ||
1854 | unsigned long flags; | ||
1855 | int ret; | ||
1780 | 1856 | ||
1781 | (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, | 1857 | if (!chan->phy_chan) { |
1782 | d40d->lli_pool.size, DMA_TO_DEVICE); | 1858 | chan_err(chan, "Cannot prepare unallocated channel\n"); |
1859 | return NULL; | ||
1783 | } | 1860 | } |
1784 | 1861 | ||
1785 | dma_async_tx_descriptor_init(&d40d->txd, chan); | ||
1786 | 1862 | ||
1787 | d40d->txd.tx_submit = d40_tx_submit; | 1863 | spin_lock_irqsave(&chan->lock, flags); |
1788 | 1864 | ||
1789 | spin_unlock_irqrestore(&d40c->lock, flags); | 1865 | desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags); |
1866 | if (desc == NULL) | ||
1867 | goto err; | ||
1868 | |||
1869 | if (sg_next(&sg_src[sg_len - 1]) == sg_src) | ||
1870 | desc->cyclic = true; | ||
1871 | |||
1872 | if (direction != DMA_NONE) { | ||
1873 | dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); | ||
1874 | |||
1875 | if (direction == DMA_FROM_DEVICE) | ||
1876 | src_dev_addr = dev_addr; | ||
1877 | else if (direction == DMA_TO_DEVICE) | ||
1878 | dst_dev_addr = dev_addr; | ||
1879 | } | ||
1880 | |||
1881 | if (chan_is_logical(chan)) | ||
1882 | ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst, | ||
1883 | sg_len, src_dev_addr, dst_dev_addr); | ||
1884 | else | ||
1885 | ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst, | ||
1886 | sg_len, src_dev_addr, dst_dev_addr); | ||
1887 | |||
1888 | if (ret) { | ||
1889 | chan_err(chan, "Failed to prepare %s sg job: %d\n", | ||
1890 | chan_is_logical(chan) ? "log" : "phy", ret); | ||
1891 | goto err; | ||
1892 | } | ||
1893 | |||
1894 | spin_unlock_irqrestore(&chan->lock, flags); | ||
1895 | |||
1896 | return &desc->txd; | ||
1790 | 1897 | ||
1791 | return &d40d->txd; | ||
1792 | err: | 1898 | err: |
1793 | if (d40d) | 1899 | if (desc) |
1794 | d40_desc_free(d40c, d40d); | 1900 | d40_desc_free(chan, desc); |
1795 | spin_unlock_irqrestore(&d40c->lock, flags); | 1901 | spin_unlock_irqrestore(&chan->lock, flags); |
1796 | return NULL; | 1902 | return NULL; |
1797 | } | 1903 | } |
1798 | EXPORT_SYMBOL(stedma40_memcpy_sg); | ||
1799 | 1904 | ||
1800 | bool stedma40_filter(struct dma_chan *chan, void *data) | 1905 | bool stedma40_filter(struct dma_chan *chan, void *data) |
1801 | { | 1906 | { |
@@ -1818,6 +1923,38 @@ bool stedma40_filter(struct dma_chan *chan, void *data) | |||
1818 | } | 1923 | } |
1819 | EXPORT_SYMBOL(stedma40_filter); | 1924 | EXPORT_SYMBOL(stedma40_filter); |
1820 | 1925 | ||
1926 | static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src) | ||
1927 | { | ||
1928 | bool realtime = d40c->dma_cfg.realtime; | ||
1929 | bool highprio = d40c->dma_cfg.high_priority; | ||
1930 | u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1; | ||
1931 | u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1; | ||
1932 | u32 event = D40_TYPE_TO_EVENT(dev_type); | ||
1933 | u32 group = D40_TYPE_TO_GROUP(dev_type); | ||
1934 | u32 bit = 1 << event; | ||
1935 | |||
1936 | /* Destination event lines are stored in the upper halfword */ | ||
1937 | if (!src) | ||
1938 | bit <<= 16; | ||
1939 | |||
1940 | writel(bit, d40c->base->virtbase + prioreg + group * 4); | ||
1941 | writel(bit, d40c->base->virtbase + rtreg + group * 4); | ||
1942 | } | ||
1943 | |||
1944 | static void d40_set_prio_realtime(struct d40_chan *d40c) | ||
1945 | { | ||
1946 | if (d40c->base->rev < 3) | ||
1947 | return; | ||
1948 | |||
1949 | if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || | ||
1950 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) | ||
1951 | __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true); | ||
1952 | |||
1953 | if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) || | ||
1954 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) | ||
1955 | __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false); | ||
1956 | } | ||
1957 | |||
1821 | /* DMA ENGINE functions */ | 1958 | /* DMA ENGINE functions */ |
1822 | static int d40_alloc_chan_resources(struct dma_chan *chan) | 1959 | static int d40_alloc_chan_resources(struct dma_chan *chan) |
1823 | { | 1960 | { |
@@ -1834,9 +1971,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
1834 | if (!d40c->configured) { | 1971 | if (!d40c->configured) { |
1835 | err = d40_config_memcpy(d40c); | 1972 | err = d40_config_memcpy(d40c); |
1836 | if (err) { | 1973 | if (err) { |
1837 | dev_err(&d40c->chan.dev->device, | 1974 | chan_err(d40c, "Failed to configure memcpy channel\n"); |
1838 | "[%s] Failed to configure memcpy channel\n", | ||
1839 | __func__); | ||
1840 | goto fail; | 1975 | goto fail; |
1841 | } | 1976 | } |
1842 | } | 1977 | } |
@@ -1844,16 +1979,17 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
1844 | 1979 | ||
1845 | err = d40_allocate_channel(d40c); | 1980 | err = d40_allocate_channel(d40c); |
1846 | if (err) { | 1981 | if (err) { |
1847 | dev_err(&d40c->chan.dev->device, | 1982 | chan_err(d40c, "Failed to allocate channel\n"); |
1848 | "[%s] Failed to allocate channel\n", __func__); | ||
1849 | goto fail; | 1983 | goto fail; |
1850 | } | 1984 | } |
1851 | 1985 | ||
1852 | /* Fill in basic CFG register values */ | 1986 | /* Fill in basic CFG register values */ |
1853 | d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, | 1987 | d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, |
1854 | &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN); | 1988 | &d40c->dst_def_cfg, chan_is_logical(d40c)); |
1855 | 1989 | ||
1856 | if (d40c->log_num != D40_PHY_CHAN) { | 1990 | d40_set_prio_realtime(d40c); |
1991 | |||
1992 | if (chan_is_logical(d40c)) { | ||
1857 | d40_log_cfg(&d40c->dma_cfg, | 1993 | d40_log_cfg(&d40c->dma_cfg, |
1858 | &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); | 1994 | &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); |
1859 | 1995 | ||
@@ -1886,8 +2022,7 @@ static void d40_free_chan_resources(struct dma_chan *chan) | |||
1886 | unsigned long flags; | 2022 | unsigned long flags; |
1887 | 2023 | ||
1888 | if (d40c->phy_chan == NULL) { | 2024 | if (d40c->phy_chan == NULL) { |
1889 | dev_err(&d40c->chan.dev->device, | 2025 | chan_err(d40c, "Cannot free unallocated channel\n"); |
1890 | "[%s] Cannot free unallocated channel\n", __func__); | ||
1891 | return; | 2026 | return; |
1892 | } | 2027 | } |
1893 | 2028 | ||
@@ -1897,8 +2032,7 @@ static void d40_free_chan_resources(struct dma_chan *chan) | |||
1897 | err = d40_free_dma(d40c); | 2032 | err = d40_free_dma(d40c); |
1898 | 2033 | ||
1899 | if (err) | 2034 | if (err) |
1900 | dev_err(&d40c->chan.dev->device, | 2035 | chan_err(d40c, "Failed to free channel\n"); |
1901 | "[%s] Failed to free channel\n", __func__); | ||
1902 | spin_unlock_irqrestore(&d40c->lock, flags); | 2036 | spin_unlock_irqrestore(&d40c->lock, flags); |
1903 | } | 2037 | } |
1904 | 2038 | ||
@@ -1908,251 +2042,31 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, | |||
1908 | size_t size, | 2042 | size_t size, |
1909 | unsigned long dma_flags) | 2043 | unsigned long dma_flags) |
1910 | { | 2044 | { |
1911 | struct d40_desc *d40d; | 2045 | struct scatterlist dst_sg; |
1912 | struct d40_chan *d40c = container_of(chan, struct d40_chan, | 2046 | struct scatterlist src_sg; |
1913 | chan); | ||
1914 | unsigned long flags; | ||
1915 | |||
1916 | if (d40c->phy_chan == NULL) { | ||
1917 | dev_err(&d40c->chan.dev->device, | ||
1918 | "[%s] Channel is not allocated.\n", __func__); | ||
1919 | return ERR_PTR(-EINVAL); | ||
1920 | } | ||
1921 | |||
1922 | spin_lock_irqsave(&d40c->lock, flags); | ||
1923 | d40d = d40_desc_get(d40c); | ||
1924 | |||
1925 | if (d40d == NULL) { | ||
1926 | dev_err(&d40c->chan.dev->device, | ||
1927 | "[%s] Descriptor is NULL\n", __func__); | ||
1928 | goto err; | ||
1929 | } | ||
1930 | 2047 | ||
1931 | d40d->txd.flags = dma_flags; | 2048 | sg_init_table(&dst_sg, 1); |
1932 | d40d->lli_len = d40_size_2_dmalen(size, | 2049 | sg_init_table(&src_sg, 1); |
1933 | d40c->dma_cfg.src_info.data_width, | ||
1934 | d40c->dma_cfg.dst_info.data_width); | ||
1935 | if (d40d->lli_len < 0) { | ||
1936 | dev_err(&d40c->chan.dev->device, | ||
1937 | "[%s] Unaligned size\n", __func__); | ||
1938 | goto err; | ||
1939 | } | ||
1940 | 2050 | ||
2051 | sg_dma_address(&dst_sg) = dst; | ||
2052 | sg_dma_address(&src_sg) = src; | ||
1941 | 2053 | ||
1942 | dma_async_tx_descriptor_init(&d40d->txd, chan); | 2054 | sg_dma_len(&dst_sg) = size; |
2055 | sg_dma_len(&src_sg) = size; | ||
1943 | 2056 | ||
1944 | d40d->txd.tx_submit = d40_tx_submit; | 2057 | return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags); |
1945 | |||
1946 | if (d40c->log_num != D40_PHY_CHAN) { | ||
1947 | |||
1948 | if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { | ||
1949 | dev_err(&d40c->chan.dev->device, | ||
1950 | "[%s] Out of memory\n", __func__); | ||
1951 | goto err; | ||
1952 | } | ||
1953 | d40d->lli_current = 0; | ||
1954 | |||
1955 | if (d40_log_buf_to_lli(d40d->lli_log.src, | ||
1956 | src, | ||
1957 | size, | ||
1958 | d40c->log_def.lcsp1, | ||
1959 | d40c->dma_cfg.src_info.data_width, | ||
1960 | d40c->dma_cfg.dst_info.data_width, | ||
1961 | true) == NULL) | ||
1962 | goto err; | ||
1963 | |||
1964 | if (d40_log_buf_to_lli(d40d->lli_log.dst, | ||
1965 | dst, | ||
1966 | size, | ||
1967 | d40c->log_def.lcsp3, | ||
1968 | d40c->dma_cfg.dst_info.data_width, | ||
1969 | d40c->dma_cfg.src_info.data_width, | ||
1970 | true) == NULL) | ||
1971 | goto err; | ||
1972 | |||
1973 | } else { | ||
1974 | |||
1975 | if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) { | ||
1976 | dev_err(&d40c->chan.dev->device, | ||
1977 | "[%s] Out of memory\n", __func__); | ||
1978 | goto err; | ||
1979 | } | ||
1980 | |||
1981 | if (d40_phy_buf_to_lli(d40d->lli_phy.src, | ||
1982 | src, | ||
1983 | size, | ||
1984 | d40c->dma_cfg.src_info.psize, | ||
1985 | 0, | ||
1986 | d40c->src_def_cfg, | ||
1987 | true, | ||
1988 | d40c->dma_cfg.src_info.data_width, | ||
1989 | d40c->dma_cfg.dst_info.data_width, | ||
1990 | false) == NULL) | ||
1991 | goto err; | ||
1992 | |||
1993 | if (d40_phy_buf_to_lli(d40d->lli_phy.dst, | ||
1994 | dst, | ||
1995 | size, | ||
1996 | d40c->dma_cfg.dst_info.psize, | ||
1997 | 0, | ||
1998 | d40c->dst_def_cfg, | ||
1999 | true, | ||
2000 | d40c->dma_cfg.dst_info.data_width, | ||
2001 | d40c->dma_cfg.src_info.data_width, | ||
2002 | false) == NULL) | ||
2003 | goto err; | ||
2004 | |||
2005 | (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, | ||
2006 | d40d->lli_pool.size, DMA_TO_DEVICE); | ||
2007 | } | ||
2008 | |||
2009 | spin_unlock_irqrestore(&d40c->lock, flags); | ||
2010 | return &d40d->txd; | ||
2011 | |||
2012 | err: | ||
2013 | if (d40d) | ||
2014 | d40_desc_free(d40c, d40d); | ||
2015 | spin_unlock_irqrestore(&d40c->lock, flags); | ||
2016 | return NULL; | ||
2017 | } | 2058 | } |
2018 | 2059 | ||
2019 | static struct dma_async_tx_descriptor * | 2060 | static struct dma_async_tx_descriptor * |
2020 | d40_prep_sg(struct dma_chan *chan, | 2061 | d40_prep_memcpy_sg(struct dma_chan *chan, |
2021 | struct scatterlist *dst_sg, unsigned int dst_nents, | 2062 | struct scatterlist *dst_sg, unsigned int dst_nents, |
2022 | struct scatterlist *src_sg, unsigned int src_nents, | 2063 | struct scatterlist *src_sg, unsigned int src_nents, |
2023 | unsigned long dma_flags) | 2064 | unsigned long dma_flags) |
2024 | { | 2065 | { |
2025 | if (dst_nents != src_nents) | 2066 | if (dst_nents != src_nents) |
2026 | return NULL; | 2067 | return NULL; |
2027 | 2068 | ||
2028 | return stedma40_memcpy_sg(chan, dst_sg, src_sg, dst_nents, dma_flags); | 2069 | return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags); |
2029 | } | ||
2030 | |||
2031 | static int d40_prep_slave_sg_log(struct d40_desc *d40d, | ||
2032 | struct d40_chan *d40c, | ||
2033 | struct scatterlist *sgl, | ||
2034 | unsigned int sg_len, | ||
2035 | enum dma_data_direction direction, | ||
2036 | unsigned long dma_flags) | ||
2037 | { | ||
2038 | dma_addr_t dev_addr = 0; | ||
2039 | int total_size; | ||
2040 | |||
2041 | d40d->lli_len = d40_sg_2_dmalen(sgl, sg_len, | ||
2042 | d40c->dma_cfg.src_info.data_width, | ||
2043 | d40c->dma_cfg.dst_info.data_width); | ||
2044 | if (d40d->lli_len < 0) { | ||
2045 | dev_err(&d40c->chan.dev->device, | ||
2046 | "[%s] Unaligned size\n", __func__); | ||
2047 | return -EINVAL; | ||
2048 | } | ||
2049 | |||
2050 | if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { | ||
2051 | dev_err(&d40c->chan.dev->device, | ||
2052 | "[%s] Out of memory\n", __func__); | ||
2053 | return -ENOMEM; | ||
2054 | } | ||
2055 | |||
2056 | d40d->lli_current = 0; | ||
2057 | |||
2058 | if (direction == DMA_FROM_DEVICE) | ||
2059 | if (d40c->runtime_addr) | ||
2060 | dev_addr = d40c->runtime_addr; | ||
2061 | else | ||
2062 | dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; | ||
2063 | else if (direction == DMA_TO_DEVICE) | ||
2064 | if (d40c->runtime_addr) | ||
2065 | dev_addr = d40c->runtime_addr; | ||
2066 | else | ||
2067 | dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; | ||
2068 | |||
2069 | else | ||
2070 | return -EINVAL; | ||
2071 | |||
2072 | total_size = d40_log_sg_to_dev(sgl, sg_len, | ||
2073 | &d40d->lli_log, | ||
2074 | &d40c->log_def, | ||
2075 | d40c->dma_cfg.src_info.data_width, | ||
2076 | d40c->dma_cfg.dst_info.data_width, | ||
2077 | direction, | ||
2078 | dev_addr); | ||
2079 | |||
2080 | if (total_size < 0) | ||
2081 | return -EINVAL; | ||
2082 | |||
2083 | return 0; | ||
2084 | } | ||
2085 | |||
2086 | static int d40_prep_slave_sg_phy(struct d40_desc *d40d, | ||
2087 | struct d40_chan *d40c, | ||
2088 | struct scatterlist *sgl, | ||
2089 | unsigned int sgl_len, | ||
2090 | enum dma_data_direction direction, | ||
2091 | unsigned long dma_flags) | ||
2092 | { | ||
2093 | dma_addr_t src_dev_addr; | ||
2094 | dma_addr_t dst_dev_addr; | ||
2095 | int res; | ||
2096 | |||
2097 | d40d->lli_len = d40_sg_2_dmalen(sgl, sgl_len, | ||
2098 | d40c->dma_cfg.src_info.data_width, | ||
2099 | d40c->dma_cfg.dst_info.data_width); | ||
2100 | if (d40d->lli_len < 0) { | ||
2101 | dev_err(&d40c->chan.dev->device, | ||
2102 | "[%s] Unaligned size\n", __func__); | ||
2103 | return -EINVAL; | ||
2104 | } | ||
2105 | |||
2106 | if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) { | ||
2107 | dev_err(&d40c->chan.dev->device, | ||
2108 | "[%s] Out of memory\n", __func__); | ||
2109 | return -ENOMEM; | ||
2110 | } | ||
2111 | |||
2112 | d40d->lli_current = 0; | ||
2113 | |||
2114 | if (direction == DMA_FROM_DEVICE) { | ||
2115 | dst_dev_addr = 0; | ||
2116 | if (d40c->runtime_addr) | ||
2117 | src_dev_addr = d40c->runtime_addr; | ||
2118 | else | ||
2119 | src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; | ||
2120 | } else if (direction == DMA_TO_DEVICE) { | ||
2121 | if (d40c->runtime_addr) | ||
2122 | dst_dev_addr = d40c->runtime_addr; | ||
2123 | else | ||
2124 | dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; | ||
2125 | src_dev_addr = 0; | ||
2126 | } else | ||
2127 | return -EINVAL; | ||
2128 | |||
2129 | res = d40_phy_sg_to_lli(sgl, | ||
2130 | sgl_len, | ||
2131 | src_dev_addr, | ||
2132 | d40d->lli_phy.src, | ||
2133 | virt_to_phys(d40d->lli_phy.src), | ||
2134 | d40c->src_def_cfg, | ||
2135 | d40c->dma_cfg.src_info.data_width, | ||
2136 | d40c->dma_cfg.dst_info.data_width, | ||
2137 | d40c->dma_cfg.src_info.psize); | ||
2138 | if (res < 0) | ||
2139 | return res; | ||
2140 | |||
2141 | res = d40_phy_sg_to_lli(sgl, | ||
2142 | sgl_len, | ||
2143 | dst_dev_addr, | ||
2144 | d40d->lli_phy.dst, | ||
2145 | virt_to_phys(d40d->lli_phy.dst), | ||
2146 | d40c->dst_def_cfg, | ||
2147 | d40c->dma_cfg.dst_info.data_width, | ||
2148 | d40c->dma_cfg.src_info.data_width, | ||
2149 | d40c->dma_cfg.dst_info.psize); | ||
2150 | if (res < 0) | ||
2151 | return res; | ||
2152 | |||
2153 | (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, | ||
2154 | d40d->lli_pool.size, DMA_TO_DEVICE); | ||
2155 | return 0; | ||
2156 | } | 2070 | } |
2157 | 2071 | ||
2158 | static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | 2072 | static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, |
@@ -2161,52 +2075,40 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | |||
2161 | enum dma_data_direction direction, | 2075 | enum dma_data_direction direction, |
2162 | unsigned long dma_flags) | 2076 | unsigned long dma_flags) |
2163 | { | 2077 | { |
2164 | struct d40_desc *d40d; | 2078 | if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) |
2165 | struct d40_chan *d40c = container_of(chan, struct d40_chan, | 2079 | return NULL; |
2166 | chan); | ||
2167 | unsigned long flags; | ||
2168 | int err; | ||
2169 | |||
2170 | if (d40c->phy_chan == NULL) { | ||
2171 | dev_err(&d40c->chan.dev->device, | ||
2172 | "[%s] Cannot prepare unallocated channel\n", __func__); | ||
2173 | return ERR_PTR(-EINVAL); | ||
2174 | } | ||
2175 | 2080 | ||
2176 | spin_lock_irqsave(&d40c->lock, flags); | 2081 | return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); |
2177 | d40d = d40_desc_get(d40c); | 2082 | } |
2178 | 2083 | ||
2179 | if (d40d == NULL) | 2084 | static struct dma_async_tx_descriptor * |
2180 | goto err; | 2085 | dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, |
2086 | size_t buf_len, size_t period_len, | ||
2087 | enum dma_data_direction direction) | ||
2088 | { | ||
2089 | unsigned int periods = buf_len / period_len; | ||
2090 | struct dma_async_tx_descriptor *txd; | ||
2091 | struct scatterlist *sg; | ||
2092 | int i; | ||
2181 | 2093 | ||
2182 | if (d40c->log_num != D40_PHY_CHAN) | 2094 | sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_KERNEL); |
2183 | err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len, | 2095 | for (i = 0; i < periods; i++) { |
2184 | direction, dma_flags); | 2096 | sg_dma_address(&sg[i]) = dma_addr; |
2185 | else | 2097 | sg_dma_len(&sg[i]) = period_len; |
2186 | err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len, | 2098 | dma_addr += period_len; |
2187 | direction, dma_flags); | ||
2188 | if (err) { | ||
2189 | dev_err(&d40c->chan.dev->device, | ||
2190 | "[%s] Failed to prepare %s slave sg job: %d\n", | ||
2191 | __func__, | ||
2192 | d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err); | ||
2193 | goto err; | ||
2194 | } | 2099 | } |
2195 | 2100 | ||
2196 | d40d->txd.flags = dma_flags; | 2101 | sg[periods].offset = 0; |
2102 | sg[periods].length = 0; | ||
2103 | sg[periods].page_link = | ||
2104 | ((unsigned long)sg | 0x01) & ~0x02; | ||
2197 | 2105 | ||
2198 | dma_async_tx_descriptor_init(&d40d->txd, chan); | 2106 | txd = d40_prep_sg(chan, sg, sg, periods, direction, |
2107 | DMA_PREP_INTERRUPT); | ||
2199 | 2108 | ||
2200 | d40d->txd.tx_submit = d40_tx_submit; | 2109 | kfree(sg); |
2201 | 2110 | ||
2202 | spin_unlock_irqrestore(&d40c->lock, flags); | 2111 | return txd; |
2203 | return &d40d->txd; | ||
2204 | |||
2205 | err: | ||
2206 | if (d40d) | ||
2207 | d40_desc_free(d40c, d40d); | ||
2208 | spin_unlock_irqrestore(&d40c->lock, flags); | ||
2209 | return NULL; | ||
2210 | } | 2112 | } |
2211 | 2113 | ||
2212 | static enum dma_status d40_tx_status(struct dma_chan *chan, | 2114 | static enum dma_status d40_tx_status(struct dma_chan *chan, |
@@ -2219,9 +2121,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan, | |||
2219 | int ret; | 2121 | int ret; |
2220 | 2122 | ||
2221 | if (d40c->phy_chan == NULL) { | 2123 | if (d40c->phy_chan == NULL) { |
2222 | dev_err(&d40c->chan.dev->device, | 2124 | chan_err(d40c, "Cannot read status of unallocated channel\n"); |
2223 | "[%s] Cannot read status of unallocated channel\n", | ||
2224 | __func__); | ||
2225 | return -EINVAL; | 2125 | return -EINVAL; |
2226 | } | 2126 | } |
2227 | 2127 | ||
@@ -2245,8 +2145,7 @@ static void d40_issue_pending(struct dma_chan *chan) | |||
2245 | unsigned long flags; | 2145 | unsigned long flags; |
2246 | 2146 | ||
2247 | if (d40c->phy_chan == NULL) { | 2147 | if (d40c->phy_chan == NULL) { |
2248 | dev_err(&d40c->chan.dev->device, | 2148 | chan_err(d40c, "Channel is not allocated!\n"); |
2249 | "[%s] Channel is not allocated!\n", __func__); | ||
2250 | return; | 2149 | return; |
2251 | } | 2150 | } |
2252 | 2151 | ||
@@ -2339,7 +2238,7 @@ static void d40_set_runtime_config(struct dma_chan *chan, | |||
2339 | return; | 2238 | return; |
2340 | } | 2239 | } |
2341 | 2240 | ||
2342 | if (d40c->log_num != D40_PHY_CHAN) { | 2241 | if (chan_is_logical(d40c)) { |
2343 | if (config_maxburst >= 16) | 2242 | if (config_maxburst >= 16) |
2344 | psize = STEDMA40_PSIZE_LOG_16; | 2243 | psize = STEDMA40_PSIZE_LOG_16; |
2345 | else if (config_maxburst >= 8) | 2244 | else if (config_maxburst >= 8) |
@@ -2372,7 +2271,7 @@ static void d40_set_runtime_config(struct dma_chan *chan, | |||
2372 | cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; | 2271 | cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; |
2373 | 2272 | ||
2374 | /* Fill in register values */ | 2273 | /* Fill in register values */ |
2375 | if (d40c->log_num != D40_PHY_CHAN) | 2274 | if (chan_is_logical(d40c)) |
2376 | d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); | 2275 | d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); |
2377 | else | 2276 | else |
2378 | d40_phy_cfg(cfg, &d40c->src_def_cfg, | 2277 | d40_phy_cfg(cfg, &d40c->src_def_cfg, |
@@ -2393,25 +2292,20 @@ static void d40_set_runtime_config(struct dma_chan *chan, | |||
2393 | static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 2292 | static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
2394 | unsigned long arg) | 2293 | unsigned long arg) |
2395 | { | 2294 | { |
2396 | unsigned long flags; | ||
2397 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | 2295 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
2398 | 2296 | ||
2399 | if (d40c->phy_chan == NULL) { | 2297 | if (d40c->phy_chan == NULL) { |
2400 | dev_err(&d40c->chan.dev->device, | 2298 | chan_err(d40c, "Channel is not allocated!\n"); |
2401 | "[%s] Channel is not allocated!\n", __func__); | ||
2402 | return -EINVAL; | 2299 | return -EINVAL; |
2403 | } | 2300 | } |
2404 | 2301 | ||
2405 | switch (cmd) { | 2302 | switch (cmd) { |
2406 | case DMA_TERMINATE_ALL: | 2303 | case DMA_TERMINATE_ALL: |
2407 | spin_lock_irqsave(&d40c->lock, flags); | 2304 | return d40_terminate_all(d40c); |
2408 | d40_term_all(d40c); | ||
2409 | spin_unlock_irqrestore(&d40c->lock, flags); | ||
2410 | return 0; | ||
2411 | case DMA_PAUSE: | 2305 | case DMA_PAUSE: |
2412 | return d40_pause(chan); | 2306 | return d40_pause(d40c); |
2413 | case DMA_RESUME: | 2307 | case DMA_RESUME: |
2414 | return d40_resume(chan); | 2308 | return d40_resume(d40c); |
2415 | case DMA_SLAVE_CONFIG: | 2309 | case DMA_SLAVE_CONFIG: |
2416 | d40_set_runtime_config(chan, | 2310 | d40_set_runtime_config(chan, |
2417 | (struct dma_slave_config *) arg); | 2311 | (struct dma_slave_config *) arg); |
@@ -2456,6 +2350,35 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, | |||
2456 | } | 2350 | } |
2457 | } | 2351 | } |
2458 | 2352 | ||
2353 | static void d40_ops_init(struct d40_base *base, struct dma_device *dev) | ||
2354 | { | ||
2355 | if (dma_has_cap(DMA_SLAVE, dev->cap_mask)) | ||
2356 | dev->device_prep_slave_sg = d40_prep_slave_sg; | ||
2357 | |||
2358 | if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) { | ||
2359 | dev->device_prep_dma_memcpy = d40_prep_memcpy; | ||
2360 | |||
2361 | /* | ||
2362 | * This controller can only access address at even | ||
2363 | * 32bit boundaries, i.e. 2^2 | ||
2364 | */ | ||
2365 | dev->copy_align = 2; | ||
2366 | } | ||
2367 | |||
2368 | if (dma_has_cap(DMA_SG, dev->cap_mask)) | ||
2369 | dev->device_prep_dma_sg = d40_prep_memcpy_sg; | ||
2370 | |||
2371 | if (dma_has_cap(DMA_CYCLIC, dev->cap_mask)) | ||
2372 | dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic; | ||
2373 | |||
2374 | dev->device_alloc_chan_resources = d40_alloc_chan_resources; | ||
2375 | dev->device_free_chan_resources = d40_free_chan_resources; | ||
2376 | dev->device_issue_pending = d40_issue_pending; | ||
2377 | dev->device_tx_status = d40_tx_status; | ||
2378 | dev->device_control = d40_control; | ||
2379 | dev->dev = base->dev; | ||
2380 | } | ||
2381 | |||
2459 | static int __init d40_dmaengine_init(struct d40_base *base, | 2382 | static int __init d40_dmaengine_init(struct d40_base *base, |
2460 | int num_reserved_chans) | 2383 | int num_reserved_chans) |
2461 | { | 2384 | { |
@@ -2466,23 +2389,14 @@ static int __init d40_dmaengine_init(struct d40_base *base, | |||
2466 | 2389 | ||
2467 | dma_cap_zero(base->dma_slave.cap_mask); | 2390 | dma_cap_zero(base->dma_slave.cap_mask); |
2468 | dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); | 2391 | dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); |
2392 | dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); | ||
2469 | 2393 | ||
2470 | base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources; | 2394 | d40_ops_init(base, &base->dma_slave); |
2471 | base->dma_slave.device_free_chan_resources = d40_free_chan_resources; | ||
2472 | base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy; | ||
2473 | base->dma_slave.device_prep_dma_sg = d40_prep_sg; | ||
2474 | base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg; | ||
2475 | base->dma_slave.device_tx_status = d40_tx_status; | ||
2476 | base->dma_slave.device_issue_pending = d40_issue_pending; | ||
2477 | base->dma_slave.device_control = d40_control; | ||
2478 | base->dma_slave.dev = base->dev; | ||
2479 | 2395 | ||
2480 | err = dma_async_device_register(&base->dma_slave); | 2396 | err = dma_async_device_register(&base->dma_slave); |
2481 | 2397 | ||
2482 | if (err) { | 2398 | if (err) { |
2483 | dev_err(base->dev, | 2399 | d40_err(base->dev, "Failed to register slave channels\n"); |
2484 | "[%s] Failed to register slave channels\n", | ||
2485 | __func__); | ||
2486 | goto failure1; | 2400 | goto failure1; |
2487 | } | 2401 | } |
2488 | 2402 | ||
@@ -2491,29 +2405,15 @@ static int __init d40_dmaengine_init(struct d40_base *base, | |||
2491 | 2405 | ||
2492 | dma_cap_zero(base->dma_memcpy.cap_mask); | 2406 | dma_cap_zero(base->dma_memcpy.cap_mask); |
2493 | dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); | 2407 | dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); |
2494 | dma_cap_set(DMA_SG, base->dma_slave.cap_mask); | 2408 | dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask); |
2495 | 2409 | ||
2496 | base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources; | 2410 | d40_ops_init(base, &base->dma_memcpy); |
2497 | base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources; | ||
2498 | base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy; | ||
2499 | base->dma_slave.device_prep_dma_sg = d40_prep_sg; | ||
2500 | base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg; | ||
2501 | base->dma_memcpy.device_tx_status = d40_tx_status; | ||
2502 | base->dma_memcpy.device_issue_pending = d40_issue_pending; | ||
2503 | base->dma_memcpy.device_control = d40_control; | ||
2504 | base->dma_memcpy.dev = base->dev; | ||
2505 | /* | ||
2506 | * This controller can only access address at even | ||
2507 | * 32bit boundaries, i.e. 2^2 | ||
2508 | */ | ||
2509 | base->dma_memcpy.copy_align = 2; | ||
2510 | 2411 | ||
2511 | err = dma_async_device_register(&base->dma_memcpy); | 2412 | err = dma_async_device_register(&base->dma_memcpy); |
2512 | 2413 | ||
2513 | if (err) { | 2414 | if (err) { |
2514 | dev_err(base->dev, | 2415 | d40_err(base->dev, |
2515 | "[%s] Failed to regsiter memcpy only channels\n", | 2416 | "Failed to regsiter memcpy only channels\n"); |
2516 | __func__); | ||
2517 | goto failure2; | 2417 | goto failure2; |
2518 | } | 2418 | } |
2519 | 2419 | ||
@@ -2523,24 +2423,15 @@ static int __init d40_dmaengine_init(struct d40_base *base, | |||
2523 | dma_cap_zero(base->dma_both.cap_mask); | 2423 | dma_cap_zero(base->dma_both.cap_mask); |
2524 | dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); | 2424 | dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); |
2525 | dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); | 2425 | dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); |
2526 | dma_cap_set(DMA_SG, base->dma_slave.cap_mask); | 2426 | dma_cap_set(DMA_SG, base->dma_both.cap_mask); |
2527 | 2427 | dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); | |
2528 | base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources; | 2428 | |
2529 | base->dma_both.device_free_chan_resources = d40_free_chan_resources; | 2429 | d40_ops_init(base, &base->dma_both); |
2530 | base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy; | ||
2531 | base->dma_slave.device_prep_dma_sg = d40_prep_sg; | ||
2532 | base->dma_both.device_prep_slave_sg = d40_prep_slave_sg; | ||
2533 | base->dma_both.device_tx_status = d40_tx_status; | ||
2534 | base->dma_both.device_issue_pending = d40_issue_pending; | ||
2535 | base->dma_both.device_control = d40_control; | ||
2536 | base->dma_both.dev = base->dev; | ||
2537 | base->dma_both.copy_align = 2; | ||
2538 | err = dma_async_device_register(&base->dma_both); | 2430 | err = dma_async_device_register(&base->dma_both); |
2539 | 2431 | ||
2540 | if (err) { | 2432 | if (err) { |
2541 | dev_err(base->dev, | 2433 | d40_err(base->dev, |
2542 | "[%s] Failed to register logical and physical capable channels\n", | 2434 | "Failed to register logical and physical capable channels\n"); |
2543 | __func__); | ||
2544 | goto failure3; | 2435 | goto failure3; |
2545 | } | 2436 | } |
2546 | return 0; | 2437 | return 0; |
@@ -2616,9 +2507,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2616 | { .reg = D40_DREG_PERIPHID1, .val = 0x0000}, | 2507 | { .reg = D40_DREG_PERIPHID1, .val = 0x0000}, |
2617 | /* | 2508 | /* |
2618 | * D40_DREG_PERIPHID2 Depends on HW revision: | 2509 | * D40_DREG_PERIPHID2 Depends on HW revision: |
2619 | * MOP500/HREF ED has 0x0008, | 2510 | * DB8500ed has 0x0008, |
2620 | * ? has 0x0018, | 2511 | * ? has 0x0018, |
2621 | * HREF V1 has 0x0028 | 2512 | * DB8500v1 has 0x0028 |
2513 | * DB8500v2 has 0x0038 | ||
2622 | */ | 2514 | */ |
2623 | { .reg = D40_DREG_PERIPHID3, .val = 0x0000}, | 2515 | { .reg = D40_DREG_PERIPHID3, .val = 0x0000}, |
2624 | 2516 | ||
@@ -2642,8 +2534,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2642 | clk = clk_get(&pdev->dev, NULL); | 2534 | clk = clk_get(&pdev->dev, NULL); |
2643 | 2535 | ||
2644 | if (IS_ERR(clk)) { | 2536 | if (IS_ERR(clk)) { |
2645 | dev_err(&pdev->dev, "[%s] No matching clock found\n", | 2537 | d40_err(&pdev->dev, "No matching clock found\n"); |
2646 | __func__); | ||
2647 | goto failure; | 2538 | goto failure; |
2648 | } | 2539 | } |
2649 | 2540 | ||
@@ -2666,9 +2557,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2666 | for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) { | 2557 | for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) { |
2667 | if (dma_id_regs[i].val != | 2558 | if (dma_id_regs[i].val != |
2668 | readl(virtbase + dma_id_regs[i].reg)) { | 2559 | readl(virtbase + dma_id_regs[i].reg)) { |
2669 | dev_err(&pdev->dev, | 2560 | d40_err(&pdev->dev, |
2670 | "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n", | 2561 | "Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n", |
2671 | __func__, | ||
2672 | dma_id_regs[i].val, | 2562 | dma_id_regs[i].val, |
2673 | dma_id_regs[i].reg, | 2563 | dma_id_regs[i].reg, |
2674 | readl(virtbase + dma_id_regs[i].reg)); | 2564 | readl(virtbase + dma_id_regs[i].reg)); |
@@ -2681,9 +2571,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2681 | 2571 | ||
2682 | if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) != | 2572 | if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) != |
2683 | D40_HW_DESIGNER) { | 2573 | D40_HW_DESIGNER) { |
2684 | dev_err(&pdev->dev, | 2574 | d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n", |
2685 | "[%s] Unknown designer! Got %x wanted %x\n", | 2575 | val & D40_DREG_PERIPHID2_DESIGNER_MASK, |
2686 | __func__, val & D40_DREG_PERIPHID2_DESIGNER_MASK, | ||
2687 | D40_HW_DESIGNER); | 2576 | D40_HW_DESIGNER); |
2688 | goto failure; | 2577 | goto failure; |
2689 | } | 2578 | } |
@@ -2713,7 +2602,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2713 | sizeof(struct d40_chan), GFP_KERNEL); | 2602 | sizeof(struct d40_chan), GFP_KERNEL); |
2714 | 2603 | ||
2715 | if (base == NULL) { | 2604 | if (base == NULL) { |
2716 | dev_err(&pdev->dev, "[%s] Out of memory\n", __func__); | 2605 | d40_err(&pdev->dev, "Out of memory\n"); |
2717 | goto failure; | 2606 | goto failure; |
2718 | } | 2607 | } |
2719 | 2608 | ||
@@ -2860,6 +2749,7 @@ static void __init d40_hw_init(struct d40_base *base) | |||
2860 | 2749 | ||
2861 | static int __init d40_lcla_allocate(struct d40_base *base) | 2750 | static int __init d40_lcla_allocate(struct d40_base *base) |
2862 | { | 2751 | { |
2752 | struct d40_lcla_pool *pool = &base->lcla_pool; | ||
2863 | unsigned long *page_list; | 2753 | unsigned long *page_list; |
2864 | int i, j; | 2754 | int i, j; |
2865 | int ret = 0; | 2755 | int ret = 0; |
@@ -2885,9 +2775,8 @@ static int __init d40_lcla_allocate(struct d40_base *base) | |||
2885 | base->lcla_pool.pages); | 2775 | base->lcla_pool.pages); |
2886 | if (!page_list[i]) { | 2776 | if (!page_list[i]) { |
2887 | 2777 | ||
2888 | dev_err(base->dev, | 2778 | d40_err(base->dev, "Failed to allocate %d pages.\n", |
2889 | "[%s] Failed to allocate %d pages.\n", | 2779 | base->lcla_pool.pages); |
2890 | __func__, base->lcla_pool.pages); | ||
2891 | 2780 | ||
2892 | for (j = 0; j < i; j++) | 2781 | for (j = 0; j < i; j++) |
2893 | free_pages(page_list[j], base->lcla_pool.pages); | 2782 | free_pages(page_list[j], base->lcla_pool.pages); |
@@ -2925,6 +2814,15 @@ static int __init d40_lcla_allocate(struct d40_base *base) | |||
2925 | LCLA_ALIGNMENT); | 2814 | LCLA_ALIGNMENT); |
2926 | } | 2815 | } |
2927 | 2816 | ||
2817 | pool->dma_addr = dma_map_single(base->dev, pool->base, | ||
2818 | SZ_1K * base->num_phy_chans, | ||
2819 | DMA_TO_DEVICE); | ||
2820 | if (dma_mapping_error(base->dev, pool->dma_addr)) { | ||
2821 | pool->dma_addr = 0; | ||
2822 | ret = -ENOMEM; | ||
2823 | goto failure; | ||
2824 | } | ||
2825 | |||
2928 | writel(virt_to_phys(base->lcla_pool.base), | 2826 | writel(virt_to_phys(base->lcla_pool.base), |
2929 | base->virtbase + D40_DREG_LCLA); | 2827 | base->virtbase + D40_DREG_LCLA); |
2930 | failure: | 2828 | failure: |
@@ -2957,9 +2855,7 @@ static int __init d40_probe(struct platform_device *pdev) | |||
2957 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa"); | 2855 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa"); |
2958 | if (!res) { | 2856 | if (!res) { |
2959 | ret = -ENOENT; | 2857 | ret = -ENOENT; |
2960 | dev_err(&pdev->dev, | 2858 | d40_err(&pdev->dev, "No \"lcpa\" memory resource\n"); |
2961 | "[%s] No \"lcpa\" memory resource\n", | ||
2962 | __func__); | ||
2963 | goto failure; | 2859 | goto failure; |
2964 | } | 2860 | } |
2965 | base->lcpa_size = resource_size(res); | 2861 | base->lcpa_size = resource_size(res); |
@@ -2968,9 +2864,9 @@ static int __init d40_probe(struct platform_device *pdev) | |||
2968 | if (request_mem_region(res->start, resource_size(res), | 2864 | if (request_mem_region(res->start, resource_size(res), |
2969 | D40_NAME " I/O lcpa") == NULL) { | 2865 | D40_NAME " I/O lcpa") == NULL) { |
2970 | ret = -EBUSY; | 2866 | ret = -EBUSY; |
2971 | dev_err(&pdev->dev, | 2867 | d40_err(&pdev->dev, |
2972 | "[%s] Failed to request LCPA region 0x%x-0x%x\n", | 2868 | "Failed to request LCPA region 0x%x-0x%x\n", |
2973 | __func__, res->start, res->end); | 2869 | res->start, res->end); |
2974 | goto failure; | 2870 | goto failure; |
2975 | } | 2871 | } |
2976 | 2872 | ||
@@ -2986,16 +2882,13 @@ static int __init d40_probe(struct platform_device *pdev) | |||
2986 | base->lcpa_base = ioremap(res->start, resource_size(res)); | 2882 | base->lcpa_base = ioremap(res->start, resource_size(res)); |
2987 | if (!base->lcpa_base) { | 2883 | if (!base->lcpa_base) { |
2988 | ret = -ENOMEM; | 2884 | ret = -ENOMEM; |
2989 | dev_err(&pdev->dev, | 2885 | d40_err(&pdev->dev, "Failed to ioremap LCPA region\n"); |
2990 | "[%s] Failed to ioremap LCPA region\n", | ||
2991 | __func__); | ||
2992 | goto failure; | 2886 | goto failure; |
2993 | } | 2887 | } |
2994 | 2888 | ||
2995 | ret = d40_lcla_allocate(base); | 2889 | ret = d40_lcla_allocate(base); |
2996 | if (ret) { | 2890 | if (ret) { |
2997 | dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n", | 2891 | d40_err(&pdev->dev, "Failed to allocate LCLA area\n"); |
2998 | __func__); | ||
2999 | goto failure; | 2892 | goto failure; |
3000 | } | 2893 | } |
3001 | 2894 | ||
@@ -3004,9 +2897,8 @@ static int __init d40_probe(struct platform_device *pdev) | |||
3004 | base->irq = platform_get_irq(pdev, 0); | 2897 | base->irq = platform_get_irq(pdev, 0); |
3005 | 2898 | ||
3006 | ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); | 2899 | ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); |
3007 | |||
3008 | if (ret) { | 2900 | if (ret) { |
3009 | dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__); | 2901 | d40_err(&pdev->dev, "No IRQ defined\n"); |
3010 | goto failure; | 2902 | goto failure; |
3011 | } | 2903 | } |
3012 | 2904 | ||
@@ -3025,6 +2917,12 @@ failure: | |||
3025 | kmem_cache_destroy(base->desc_slab); | 2917 | kmem_cache_destroy(base->desc_slab); |
3026 | if (base->virtbase) | 2918 | if (base->virtbase) |
3027 | iounmap(base->virtbase); | 2919 | iounmap(base->virtbase); |
2920 | |||
2921 | if (base->lcla_pool.dma_addr) | ||
2922 | dma_unmap_single(base->dev, base->lcla_pool.dma_addr, | ||
2923 | SZ_1K * base->num_phy_chans, | ||
2924 | DMA_TO_DEVICE); | ||
2925 | |||
3028 | if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) | 2926 | if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) |
3029 | free_pages((unsigned long)base->lcla_pool.base, | 2927 | free_pages((unsigned long)base->lcla_pool.base, |
3030 | base->lcla_pool.pages); | 2928 | base->lcla_pool.pages); |
@@ -3049,7 +2947,7 @@ failure: | |||
3049 | kfree(base); | 2947 | kfree(base); |
3050 | } | 2948 | } |
3051 | 2949 | ||
3052 | dev_err(&pdev->dev, "[%s] probe failed\n", __func__); | 2950 | d40_err(&pdev->dev, "probe failed\n"); |
3053 | return ret; | 2951 | return ret; |
3054 | } | 2952 | } |
3055 | 2953 | ||
@@ -3060,7 +2958,7 @@ static struct platform_driver d40_driver = { | |||
3060 | }, | 2958 | }, |
3061 | }; | 2959 | }; |
3062 | 2960 | ||
3063 | int __init stedma40_init(void) | 2961 | static int __init stedma40_init(void) |
3064 | { | 2962 | { |
3065 | return platform_driver_probe(&d40_driver, d40_probe); | 2963 | return platform_driver_probe(&d40_driver, d40_probe); |
3066 | } | 2964 | } |
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index 0b096a38322d..cad9e1daedff 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c | |||
@@ -125,13 +125,15 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg, | |||
125 | static int d40_phy_fill_lli(struct d40_phy_lli *lli, | 125 | static int d40_phy_fill_lli(struct d40_phy_lli *lli, |
126 | dma_addr_t data, | 126 | dma_addr_t data, |
127 | u32 data_size, | 127 | u32 data_size, |
128 | int psize, | ||
129 | dma_addr_t next_lli, | 128 | dma_addr_t next_lli, |
130 | u32 reg_cfg, | 129 | u32 reg_cfg, |
131 | bool term_int, | 130 | struct stedma40_half_channel_info *info, |
132 | u32 data_width, | 131 | unsigned int flags) |
133 | bool is_device) | ||
134 | { | 132 | { |
133 | bool addr_inc = flags & LLI_ADDR_INC; | ||
134 | bool term_int = flags & LLI_TERM_INT; | ||
135 | unsigned int data_width = info->data_width; | ||
136 | int psize = info->psize; | ||
135 | int num_elems; | 137 | int num_elems; |
136 | 138 | ||
137 | if (psize == STEDMA40_PSIZE_PHY_1) | 139 | if (psize == STEDMA40_PSIZE_PHY_1) |
@@ -154,7 +156,7 @@ static int d40_phy_fill_lli(struct d40_phy_lli *lli, | |||
154 | * Distance to next element sized entry. | 156 | * Distance to next element sized entry. |
155 | * Usually the size of the element unless you want gaps. | 157 | * Usually the size of the element unless you want gaps. |
156 | */ | 158 | */ |
157 | if (!is_device) | 159 | if (addr_inc) |
158 | lli->reg_elt |= (0x1 << data_width) << | 160 | lli->reg_elt |= (0x1 << data_width) << |
159 | D40_SREG_ELEM_PHY_EIDX_POS; | 161 | D40_SREG_ELEM_PHY_EIDX_POS; |
160 | 162 | ||
@@ -198,47 +200,51 @@ static int d40_seg_size(int size, int data_width1, int data_width2) | |||
198 | return seg_max; | 200 | return seg_max; |
199 | } | 201 | } |
200 | 202 | ||
201 | struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli, | 203 | static struct d40_phy_lli * |
202 | dma_addr_t addr, | 204 | d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size, |
203 | u32 size, | 205 | dma_addr_t lli_phys, dma_addr_t first_phys, u32 reg_cfg, |
204 | int psize, | 206 | struct stedma40_half_channel_info *info, |
205 | dma_addr_t lli_phys, | 207 | struct stedma40_half_channel_info *otherinfo, |
206 | u32 reg_cfg, | 208 | unsigned long flags) |
207 | bool term_int, | ||
208 | u32 data_width1, | ||
209 | u32 data_width2, | ||
210 | bool is_device) | ||
211 | { | 209 | { |
210 | bool lastlink = flags & LLI_LAST_LINK; | ||
211 | bool addr_inc = flags & LLI_ADDR_INC; | ||
212 | bool term_int = flags & LLI_TERM_INT; | ||
213 | bool cyclic = flags & LLI_CYCLIC; | ||
212 | int err; | 214 | int err; |
213 | dma_addr_t next = lli_phys; | 215 | dma_addr_t next = lli_phys; |
214 | int size_rest = size; | 216 | int size_rest = size; |
215 | int size_seg = 0; | 217 | int size_seg = 0; |
216 | 218 | ||
219 | /* | ||
220 | * This piece may be split up based on d40_seg_size(); we only want the | ||
221 | * term int on the last part. | ||
222 | */ | ||
223 | if (term_int) | ||
224 | flags &= ~LLI_TERM_INT; | ||
225 | |||
217 | do { | 226 | do { |
218 | size_seg = d40_seg_size(size_rest, data_width1, data_width2); | 227 | size_seg = d40_seg_size(size_rest, info->data_width, |
228 | otherinfo->data_width); | ||
219 | size_rest -= size_seg; | 229 | size_rest -= size_seg; |
220 | 230 | ||
221 | if (term_int && size_rest == 0) | 231 | if (size_rest == 0 && term_int) |
222 | next = 0; | 232 | flags |= LLI_TERM_INT; |
233 | |||
234 | if (size_rest == 0 && lastlink) | ||
235 | next = cyclic ? first_phys : 0; | ||
223 | else | 236 | else |
224 | next = ALIGN(next + sizeof(struct d40_phy_lli), | 237 | next = ALIGN(next + sizeof(struct d40_phy_lli), |
225 | D40_LLI_ALIGN); | 238 | D40_LLI_ALIGN); |
226 | 239 | ||
227 | err = d40_phy_fill_lli(lli, | 240 | err = d40_phy_fill_lli(lli, addr, size_seg, next, |
228 | addr, | 241 | reg_cfg, info, flags); |
229 | size_seg, | ||
230 | psize, | ||
231 | next, | ||
232 | reg_cfg, | ||
233 | !next, | ||
234 | data_width1, | ||
235 | is_device); | ||
236 | 242 | ||
237 | if (err) | 243 | if (err) |
238 | goto err; | 244 | goto err; |
239 | 245 | ||
240 | lli++; | 246 | lli++; |
241 | if (!is_device) | 247 | if (addr_inc) |
242 | addr += size_seg; | 248 | addr += size_seg; |
243 | } while (size_rest); | 249 | } while (size_rest); |
244 | 250 | ||
@@ -254,39 +260,35 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, | |||
254 | struct d40_phy_lli *lli_sg, | 260 | struct d40_phy_lli *lli_sg, |
255 | dma_addr_t lli_phys, | 261 | dma_addr_t lli_phys, |
256 | u32 reg_cfg, | 262 | u32 reg_cfg, |
257 | u32 data_width1, | 263 | struct stedma40_half_channel_info *info, |
258 | u32 data_width2, | 264 | struct stedma40_half_channel_info *otherinfo, |
259 | int psize) | 265 | unsigned long flags) |
260 | { | 266 | { |
261 | int total_size = 0; | 267 | int total_size = 0; |
262 | int i; | 268 | int i; |
263 | struct scatterlist *current_sg = sg; | 269 | struct scatterlist *current_sg = sg; |
264 | dma_addr_t dst; | ||
265 | struct d40_phy_lli *lli = lli_sg; | 270 | struct d40_phy_lli *lli = lli_sg; |
266 | dma_addr_t l_phys = lli_phys; | 271 | dma_addr_t l_phys = lli_phys; |
267 | 272 | ||
273 | if (!target) | ||
274 | flags |= LLI_ADDR_INC; | ||
275 | |||
268 | for_each_sg(sg, current_sg, sg_len, i) { | 276 | for_each_sg(sg, current_sg, sg_len, i) { |
277 | dma_addr_t sg_addr = sg_dma_address(current_sg); | ||
278 | unsigned int len = sg_dma_len(current_sg); | ||
279 | dma_addr_t dst = target ?: sg_addr; | ||
269 | 280 | ||
270 | total_size += sg_dma_len(current_sg); | 281 | total_size += sg_dma_len(current_sg); |
271 | 282 | ||
272 | if (target) | 283 | if (i == sg_len - 1) |
273 | dst = target; | 284 | flags |= LLI_TERM_INT | LLI_LAST_LINK; |
274 | else | ||
275 | dst = sg_phys(current_sg); | ||
276 | 285 | ||
277 | l_phys = ALIGN(lli_phys + (lli - lli_sg) * | 286 | l_phys = ALIGN(lli_phys + (lli - lli_sg) * |
278 | sizeof(struct d40_phy_lli), D40_LLI_ALIGN); | 287 | sizeof(struct d40_phy_lli), D40_LLI_ALIGN); |
279 | 288 | ||
280 | lli = d40_phy_buf_to_lli(lli, | 289 | lli = d40_phy_buf_to_lli(lli, dst, len, l_phys, lli_phys, |
281 | dst, | 290 | reg_cfg, info, otherinfo, flags); |
282 | sg_dma_len(current_sg), | 291 | |
283 | psize, | ||
284 | l_phys, | ||
285 | reg_cfg, | ||
286 | sg_len - 1 == i, | ||
287 | data_width1, | ||
288 | data_width2, | ||
289 | target == dst); | ||
290 | if (lli == NULL) | 292 | if (lli == NULL) |
291 | return -EINVAL; | 293 | return -EINVAL; |
292 | } | 294 | } |
@@ -295,45 +297,22 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, | |||
295 | } | 297 | } |
296 | 298 | ||
297 | 299 | ||
298 | void d40_phy_lli_write(void __iomem *virtbase, | ||
299 | u32 phy_chan_num, | ||
300 | struct d40_phy_lli *lli_dst, | ||
301 | struct d40_phy_lli *lli_src) | ||
302 | { | ||
303 | |||
304 | writel(lli_src->reg_cfg, virtbase + D40_DREG_PCBASE + | ||
305 | phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSCFG); | ||
306 | writel(lli_src->reg_elt, virtbase + D40_DREG_PCBASE + | ||
307 | phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT); | ||
308 | writel(lli_src->reg_ptr, virtbase + D40_DREG_PCBASE + | ||
309 | phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSPTR); | ||
310 | writel(lli_src->reg_lnk, virtbase + D40_DREG_PCBASE + | ||
311 | phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSLNK); | ||
312 | |||
313 | writel(lli_dst->reg_cfg, virtbase + D40_DREG_PCBASE + | ||
314 | phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDCFG); | ||
315 | writel(lli_dst->reg_elt, virtbase + D40_DREG_PCBASE + | ||
316 | phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT); | ||
317 | writel(lli_dst->reg_ptr, virtbase + D40_DREG_PCBASE + | ||
318 | phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDPTR); | ||
319 | writel(lli_dst->reg_lnk, virtbase + D40_DREG_PCBASE + | ||
320 | phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDLNK); | ||
321 | |||
322 | } | ||
323 | |||
324 | /* DMA logical lli operations */ | 300 | /* DMA logical lli operations */ |
325 | 301 | ||
326 | static void d40_log_lli_link(struct d40_log_lli *lli_dst, | 302 | static void d40_log_lli_link(struct d40_log_lli *lli_dst, |
327 | struct d40_log_lli *lli_src, | 303 | struct d40_log_lli *lli_src, |
328 | int next) | 304 | int next, unsigned int flags) |
329 | { | 305 | { |
306 | bool interrupt = flags & LLI_TERM_INT; | ||
330 | u32 slos = 0; | 307 | u32 slos = 0; |
331 | u32 dlos = 0; | 308 | u32 dlos = 0; |
332 | 309 | ||
333 | if (next != -EINVAL) { | 310 | if (next != -EINVAL) { |
334 | slos = next * 2; | 311 | slos = next * 2; |
335 | dlos = next * 2 + 1; | 312 | dlos = next * 2 + 1; |
336 | } else { | 313 | } |
314 | |||
315 | if (interrupt) { | ||
337 | lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK; | 316 | lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK; |
338 | lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK; | 317 | lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK; |
339 | } | 318 | } |
@@ -348,9 +327,9 @@ static void d40_log_lli_link(struct d40_log_lli *lli_dst, | |||
348 | void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, | 327 | void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, |
349 | struct d40_log_lli *lli_dst, | 328 | struct d40_log_lli *lli_dst, |
350 | struct d40_log_lli *lli_src, | 329 | struct d40_log_lli *lli_src, |
351 | int next) | 330 | int next, unsigned int flags) |
352 | { | 331 | { |
353 | d40_log_lli_link(lli_dst, lli_src, next); | 332 | d40_log_lli_link(lli_dst, lli_src, next, flags); |
354 | 333 | ||
355 | writel(lli_src->lcsp02, &lcpa[0].lcsp0); | 334 | writel(lli_src->lcsp02, &lcpa[0].lcsp0); |
356 | writel(lli_src->lcsp13, &lcpa[0].lcsp1); | 335 | writel(lli_src->lcsp13, &lcpa[0].lcsp1); |
@@ -361,9 +340,9 @@ void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, | |||
361 | void d40_log_lli_lcla_write(struct d40_log_lli *lcla, | 340 | void d40_log_lli_lcla_write(struct d40_log_lli *lcla, |
362 | struct d40_log_lli *lli_dst, | 341 | struct d40_log_lli *lli_dst, |
363 | struct d40_log_lli *lli_src, | 342 | struct d40_log_lli *lli_src, |
364 | int next) | 343 | int next, unsigned int flags) |
365 | { | 344 | { |
366 | d40_log_lli_link(lli_dst, lli_src, next); | 345 | d40_log_lli_link(lli_dst, lli_src, next, flags); |
367 | 346 | ||
368 | writel(lli_src->lcsp02, &lcla[0].lcsp02); | 347 | writel(lli_src->lcsp02, &lcla[0].lcsp02); |
369 | writel(lli_src->lcsp13, &lcla[0].lcsp13); | 348 | writel(lli_src->lcsp13, &lcla[0].lcsp13); |
@@ -375,8 +354,10 @@ static void d40_log_fill_lli(struct d40_log_lli *lli, | |||
375 | dma_addr_t data, u32 data_size, | 354 | dma_addr_t data, u32 data_size, |
376 | u32 reg_cfg, | 355 | u32 reg_cfg, |
377 | u32 data_width, | 356 | u32 data_width, |
378 | bool addr_inc) | 357 | unsigned int flags) |
379 | { | 358 | { |
359 | bool addr_inc = flags & LLI_ADDR_INC; | ||
360 | |||
380 | lli->lcsp13 = reg_cfg; | 361 | lli->lcsp13 = reg_cfg; |
381 | 362 | ||
382 | /* The number of elements to transfer */ | 363 | /* The number of elements to transfer */ |
@@ -395,67 +376,15 @@ static void d40_log_fill_lli(struct d40_log_lli *lli, | |||
395 | 376 | ||
396 | } | 377 | } |
397 | 378 | ||
398 | int d40_log_sg_to_dev(struct scatterlist *sg, | 379 | static struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, |
399 | int sg_len, | ||
400 | struct d40_log_lli_bidir *lli, | ||
401 | struct d40_def_lcsp *lcsp, | ||
402 | u32 src_data_width, | ||
403 | u32 dst_data_width, | ||
404 | enum dma_data_direction direction, | ||
405 | dma_addr_t dev_addr) | ||
406 | { | ||
407 | int total_size = 0; | ||
408 | struct scatterlist *current_sg = sg; | ||
409 | int i; | ||
410 | struct d40_log_lli *lli_src = lli->src; | ||
411 | struct d40_log_lli *lli_dst = lli->dst; | ||
412 | |||
413 | for_each_sg(sg, current_sg, sg_len, i) { | ||
414 | total_size += sg_dma_len(current_sg); | ||
415 | |||
416 | if (direction == DMA_TO_DEVICE) { | ||
417 | lli_src = | ||
418 | d40_log_buf_to_lli(lli_src, | ||
419 | sg_phys(current_sg), | ||
420 | sg_dma_len(current_sg), | ||
421 | lcsp->lcsp1, src_data_width, | ||
422 | dst_data_width, | ||
423 | true); | ||
424 | lli_dst = | ||
425 | d40_log_buf_to_lli(lli_dst, | ||
426 | dev_addr, | ||
427 | sg_dma_len(current_sg), | ||
428 | lcsp->lcsp3, dst_data_width, | ||
429 | src_data_width, | ||
430 | false); | ||
431 | } else { | ||
432 | lli_dst = | ||
433 | d40_log_buf_to_lli(lli_dst, | ||
434 | sg_phys(current_sg), | ||
435 | sg_dma_len(current_sg), | ||
436 | lcsp->lcsp3, dst_data_width, | ||
437 | src_data_width, | ||
438 | true); | ||
439 | lli_src = | ||
440 | d40_log_buf_to_lli(lli_src, | ||
441 | dev_addr, | ||
442 | sg_dma_len(current_sg), | ||
443 | lcsp->lcsp1, src_data_width, | ||
444 | dst_data_width, | ||
445 | false); | ||
446 | } | ||
447 | } | ||
448 | return total_size; | ||
449 | } | ||
450 | |||
451 | struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, | ||
452 | dma_addr_t addr, | 380 | dma_addr_t addr, |
453 | int size, | 381 | int size, |
454 | u32 lcsp13, /* src or dst*/ | 382 | u32 lcsp13, /* src or dst*/ |
455 | u32 data_width1, | 383 | u32 data_width1, |
456 | u32 data_width2, | 384 | u32 data_width2, |
457 | bool addr_inc) | 385 | unsigned int flags) |
458 | { | 386 | { |
387 | bool addr_inc = flags & LLI_ADDR_INC; | ||
459 | struct d40_log_lli *lli = lli_sg; | 388 | struct d40_log_lli *lli = lli_sg; |
460 | int size_rest = size; | 389 | int size_rest = size; |
461 | int size_seg = 0; | 390 | int size_seg = 0; |
@@ -468,7 +397,7 @@ struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, | |||
468 | addr, | 397 | addr, |
469 | size_seg, | 398 | size_seg, |
470 | lcsp13, data_width1, | 399 | lcsp13, data_width1, |
471 | addr_inc); | 400 | flags); |
472 | if (addr_inc) | 401 | if (addr_inc) |
473 | addr += size_seg; | 402 | addr += size_seg; |
474 | lli++; | 403 | lli++; |
@@ -479,6 +408,7 @@ struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, | |||
479 | 408 | ||
480 | int d40_log_sg_to_lli(struct scatterlist *sg, | 409 | int d40_log_sg_to_lli(struct scatterlist *sg, |
481 | int sg_len, | 410 | int sg_len, |
411 | dma_addr_t dev_addr, | ||
482 | struct d40_log_lli *lli_sg, | 412 | struct d40_log_lli *lli_sg, |
483 | u32 lcsp13, /* src or dst*/ | 413 | u32 lcsp13, /* src or dst*/ |
484 | u32 data_width1, u32 data_width2) | 414 | u32 data_width1, u32 data_width2) |
@@ -487,14 +417,24 @@ int d40_log_sg_to_lli(struct scatterlist *sg, | |||
487 | struct scatterlist *current_sg = sg; | 417 | struct scatterlist *current_sg = sg; |
488 | int i; | 418 | int i; |
489 | struct d40_log_lli *lli = lli_sg; | 419 | struct d40_log_lli *lli = lli_sg; |
420 | unsigned long flags = 0; | ||
421 | |||
422 | if (!dev_addr) | ||
423 | flags |= LLI_ADDR_INC; | ||
490 | 424 | ||
491 | for_each_sg(sg, current_sg, sg_len, i) { | 425 | for_each_sg(sg, current_sg, sg_len, i) { |
426 | dma_addr_t sg_addr = sg_dma_address(current_sg); | ||
427 | unsigned int len = sg_dma_len(current_sg); | ||
428 | dma_addr_t addr = dev_addr ?: sg_addr; | ||
429 | |||
492 | total_size += sg_dma_len(current_sg); | 430 | total_size += sg_dma_len(current_sg); |
493 | lli = d40_log_buf_to_lli(lli, | 431 | |
494 | sg_phys(current_sg), | 432 | lli = d40_log_buf_to_lli(lli, addr, len, |
495 | sg_dma_len(current_sg), | ||
496 | lcsp13, | 433 | lcsp13, |
497 | data_width1, data_width2, true); | 434 | data_width1, |
435 | data_width2, | ||
436 | flags); | ||
498 | } | 437 | } |
438 | |||
499 | return total_size; | 439 | return total_size; |
500 | } | 440 | } |
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index 9cc43495bea2..195ee65ee7f3 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h | |||
@@ -163,6 +163,22 @@ | |||
163 | #define D40_DREG_LCEIS1 0x0B4 | 163 | #define D40_DREG_LCEIS1 0x0B4 |
164 | #define D40_DREG_LCEIS2 0x0B8 | 164 | #define D40_DREG_LCEIS2 0x0B8 |
165 | #define D40_DREG_LCEIS3 0x0BC | 165 | #define D40_DREG_LCEIS3 0x0BC |
166 | #define D40_DREG_PSEG1 0x110 | ||
167 | #define D40_DREG_PSEG2 0x114 | ||
168 | #define D40_DREG_PSEG3 0x118 | ||
169 | #define D40_DREG_PSEG4 0x11C | ||
170 | #define D40_DREG_PCEG1 0x120 | ||
171 | #define D40_DREG_PCEG2 0x124 | ||
172 | #define D40_DREG_PCEG3 0x128 | ||
173 | #define D40_DREG_PCEG4 0x12C | ||
174 | #define D40_DREG_RSEG1 0x130 | ||
175 | #define D40_DREG_RSEG2 0x134 | ||
176 | #define D40_DREG_RSEG3 0x138 | ||
177 | #define D40_DREG_RSEG4 0x13C | ||
178 | #define D40_DREG_RCEG1 0x140 | ||
179 | #define D40_DREG_RCEG2 0x144 | ||
180 | #define D40_DREG_RCEG3 0x148 | ||
181 | #define D40_DREG_RCEG4 0x14C | ||
166 | #define D40_DREG_STFU 0xFC8 | 182 | #define D40_DREG_STFU 0xFC8 |
167 | #define D40_DREG_ICFG 0xFCC | 183 | #define D40_DREG_ICFG 0xFCC |
168 | #define D40_DREG_PERIPHID0 0xFE0 | 184 | #define D40_DREG_PERIPHID0 0xFE0 |
@@ -277,6 +293,13 @@ struct d40_def_lcsp { | |||
277 | 293 | ||
278 | /* Physical channels */ | 294 | /* Physical channels */ |
279 | 295 | ||
296 | enum d40_lli_flags { | ||
297 | LLI_ADDR_INC = 1 << 0, | ||
298 | LLI_TERM_INT = 1 << 1, | ||
299 | LLI_CYCLIC = 1 << 2, | ||
300 | LLI_LAST_LINK = 1 << 3, | ||
301 | }; | ||
302 | |||
280 | void d40_phy_cfg(struct stedma40_chan_cfg *cfg, | 303 | void d40_phy_cfg(struct stedma40_chan_cfg *cfg, |
281 | u32 *src_cfg, | 304 | u32 *src_cfg, |
282 | u32 *dst_cfg, | 305 | u32 *dst_cfg, |
@@ -292,46 +315,15 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, | |||
292 | struct d40_phy_lli *lli, | 315 | struct d40_phy_lli *lli, |
293 | dma_addr_t lli_phys, | 316 | dma_addr_t lli_phys, |
294 | u32 reg_cfg, | 317 | u32 reg_cfg, |
295 | u32 data_width1, | 318 | struct stedma40_half_channel_info *info, |
296 | u32 data_width2, | 319 | struct stedma40_half_channel_info *otherinfo, |
297 | int psize); | 320 | unsigned long flags); |
298 | |||
299 | struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli, | ||
300 | dma_addr_t data, | ||
301 | u32 data_size, | ||
302 | int psize, | ||
303 | dma_addr_t next_lli, | ||
304 | u32 reg_cfg, | ||
305 | bool term_int, | ||
306 | u32 data_width1, | ||
307 | u32 data_width2, | ||
308 | bool is_device); | ||
309 | |||
310 | void d40_phy_lli_write(void __iomem *virtbase, | ||
311 | u32 phy_chan_num, | ||
312 | struct d40_phy_lli *lli_dst, | ||
313 | struct d40_phy_lli *lli_src); | ||
314 | 321 | ||
315 | /* Logical channels */ | 322 | /* Logical channels */ |
316 | 323 | ||
317 | struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, | ||
318 | dma_addr_t addr, | ||
319 | int size, | ||
320 | u32 lcsp13, /* src or dst*/ | ||
321 | u32 data_width1, u32 data_width2, | ||
322 | bool addr_inc); | ||
323 | |||
324 | int d40_log_sg_to_dev(struct scatterlist *sg, | ||
325 | int sg_len, | ||
326 | struct d40_log_lli_bidir *lli, | ||
327 | struct d40_def_lcsp *lcsp, | ||
328 | u32 src_data_width, | ||
329 | u32 dst_data_width, | ||
330 | enum dma_data_direction direction, | ||
331 | dma_addr_t dev_addr); | ||
332 | |||
333 | int d40_log_sg_to_lli(struct scatterlist *sg, | 324 | int d40_log_sg_to_lli(struct scatterlist *sg, |
334 | int sg_len, | 325 | int sg_len, |
326 | dma_addr_t dev_addr, | ||
335 | struct d40_log_lli *lli_sg, | 327 | struct d40_log_lli *lli_sg, |
336 | u32 lcsp13, /* src or dst*/ | 328 | u32 lcsp13, /* src or dst*/ |
337 | u32 data_width1, u32 data_width2); | 329 | u32 data_width1, u32 data_width2); |
@@ -339,11 +331,11 @@ int d40_log_sg_to_lli(struct scatterlist *sg, | |||
339 | void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, | 331 | void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, |
340 | struct d40_log_lli *lli_dst, | 332 | struct d40_log_lli *lli_dst, |
341 | struct d40_log_lli *lli_src, | 333 | struct d40_log_lli *lli_src, |
342 | int next); | 334 | int next, unsigned int flags); |
343 | 335 | ||
344 | void d40_log_lli_lcla_write(struct d40_log_lli *lcla, | 336 | void d40_log_lli_lcla_write(struct d40_log_lli *lcla, |
345 | struct d40_log_lli *lli_dst, | 337 | struct d40_log_lli *lli_dst, |
346 | struct d40_log_lli *lli_src, | 338 | struct d40_log_lli *lli_src, |
347 | int next); | 339 | int next, unsigned int flags); |
348 | 340 | ||
349 | #endif /* STE_DMA40_LLI_H */ | 341 | #endif /* STE_DMA40_LLI_H */ |
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h index c8aad713a046..6998d9376ef9 100644 --- a/include/linux/dw_dmac.h +++ b/include/linux/dw_dmac.h | |||
@@ -16,9 +16,18 @@ | |||
16 | /** | 16 | /** |
17 | * struct dw_dma_platform_data - Controller configuration parameters | 17 | * struct dw_dma_platform_data - Controller configuration parameters |
18 | * @nr_channels: Number of channels supported by hardware (max 8) | 18 | * @nr_channels: Number of channels supported by hardware (max 8) |
19 | * @is_private: The device channels should be marked as private and not for | ||
20 | * by the general purpose DMA channel allocator. | ||
19 | */ | 21 | */ |
20 | struct dw_dma_platform_data { | 22 | struct dw_dma_platform_data { |
21 | unsigned int nr_channels; | 23 | unsigned int nr_channels; |
24 | bool is_private; | ||
25 | #define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ | ||
26 | #define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ | ||
27 | unsigned char chan_allocation_order; | ||
28 | #define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */ | ||
29 | #define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */ | ||
30 | unsigned char chan_priority; | ||
22 | }; | 31 | }; |
23 | 32 | ||
24 | /** | 33 | /** |
@@ -33,6 +42,30 @@ enum dw_dma_slave_width { | |||
33 | DW_DMA_SLAVE_WIDTH_32BIT, | 42 | DW_DMA_SLAVE_WIDTH_32BIT, |
34 | }; | 43 | }; |
35 | 44 | ||
45 | /* bursts size */ | ||
46 | enum dw_dma_msize { | ||
47 | DW_DMA_MSIZE_1, | ||
48 | DW_DMA_MSIZE_4, | ||
49 | DW_DMA_MSIZE_8, | ||
50 | DW_DMA_MSIZE_16, | ||
51 | DW_DMA_MSIZE_32, | ||
52 | DW_DMA_MSIZE_64, | ||
53 | DW_DMA_MSIZE_128, | ||
54 | DW_DMA_MSIZE_256, | ||
55 | }; | ||
56 | |||
57 | /* flow controller */ | ||
58 | enum dw_dma_fc { | ||
59 | DW_DMA_FC_D_M2M, | ||
60 | DW_DMA_FC_D_M2P, | ||
61 | DW_DMA_FC_D_P2M, | ||
62 | DW_DMA_FC_D_P2P, | ||
63 | DW_DMA_FC_P_P2M, | ||
64 | DW_DMA_FC_SP_P2P, | ||
65 | DW_DMA_FC_P_M2P, | ||
66 | DW_DMA_FC_DP_P2P, | ||
67 | }; | ||
68 | |||
36 | /** | 69 | /** |
37 | * struct dw_dma_slave - Controller-specific information about a slave | 70 | * struct dw_dma_slave - Controller-specific information about a slave |
38 | * | 71 | * |
@@ -44,6 +77,11 @@ enum dw_dma_slave_width { | |||
44 | * @reg_width: peripheral register width | 77 | * @reg_width: peripheral register width |
45 | * @cfg_hi: Platform-specific initializer for the CFG_HI register | 78 | * @cfg_hi: Platform-specific initializer for the CFG_HI register |
46 | * @cfg_lo: Platform-specific initializer for the CFG_LO register | 79 | * @cfg_lo: Platform-specific initializer for the CFG_LO register |
80 | * @src_master: src master for transfers on allocated channel. | ||
81 | * @dst_master: dest master for transfers on allocated channel. | ||
82 | * @src_msize: src burst size. | ||
83 | * @dst_msize: dest burst size. | ||
84 | * @fc: flow controller for DMA transfer | ||
47 | */ | 85 | */ |
48 | struct dw_dma_slave { | 86 | struct dw_dma_slave { |
49 | struct device *dma_dev; | 87 | struct device *dma_dev; |
@@ -52,6 +90,11 @@ struct dw_dma_slave { | |||
52 | enum dw_dma_slave_width reg_width; | 90 | enum dw_dma_slave_width reg_width; |
53 | u32 cfg_hi; | 91 | u32 cfg_hi; |
54 | u32 cfg_lo; | 92 | u32 cfg_lo; |
93 | u8 src_master; | ||
94 | u8 dst_master; | ||
95 | u8 src_msize; | ||
96 | u8 dst_msize; | ||
97 | u8 fc; | ||
55 | }; | 98 | }; |
56 | 99 | ||
57 | /* Platform-configurable bits in CFG_HI */ | 100 | /* Platform-configurable bits in CFG_HI */ |
@@ -62,7 +105,6 @@ struct dw_dma_slave { | |||
62 | #define DWC_CFGH_DST_PER(x) ((x) << 11) | 105 | #define DWC_CFGH_DST_PER(x) ((x) << 11) |
63 | 106 | ||
64 | /* Platform-configurable bits in CFG_LO */ | 107 | /* Platform-configurable bits in CFG_LO */ |
65 | #define DWC_CFGL_PRIO(x) ((x) << 5) /* priority */ | ||
66 | #define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */ | 108 | #define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */ |
67 | #define DWC_CFGL_LOCK_CH_BLOCK (1 << 12) | 109 | #define DWC_CFGL_LOCK_CH_BLOCK (1 << 12) |
68 | #define DWC_CFGL_LOCK_CH_XACT (2 << 12) | 110 | #define DWC_CFGL_LOCK_CH_XACT (2 << 12) |