aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Walleij <linus.walleij@stericsson.com>2010-03-30 09:33:42 -0400
committerDan Williams <dan.j.williams@intel.com>2010-04-14 17:49:20 -0400
commit8d318a50b3d72e3daf94131f91e1ab799a8d5ad4 (patch)
treeae36452931d2e836f725b3f91eebd7f4d9e27589
parent6a3cd3ea48584d14f60dce0b3c4e9e4428beb0fe (diff)
DMAENGINE: Support for ST-Ericssons DMA40 block v3
This is a straightforward driver for the ST-Ericsson DMA40 DMA controller found in U8500, implemented akin to the existing COH 901 318 driver. Signed-off-by: Linus Walleij <linus.walleij@stericsson.com> Acked-by: Srinidh Kasagar <srinidhi.kasagar@stericsson.com> Cc: STEricsson_nomadik_linux@list.st.com Cc: Alessandro Rubini <rubini@unipv.it> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--arch/arm/plat-nomadik/include/plat/ste_dma40.h239
-rw-r--r--drivers/dma/Kconfig7
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/ste_dma40.c2596
-rw-r--r--drivers/dma/ste_dma40_ll.c454
-rw-r--r--drivers/dma/ste_dma40_ll.h354
6 files changed, 3651 insertions, 0 deletions
diff --git a/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
new file mode 100644
index 000000000000..4d12ea4ca361
--- /dev/null
+++ b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
@@ -0,0 +1,239 @@
1/*
2 * arch/arm/plat-nomadik/include/plat/ste_dma40.h
3 *
4 * Copyright (C) ST-Ericsson 2007-2010
5 * License terms: GNU General Public License (GPL) version 2
6 * Author: Per Friden <per.friden@stericsson.com>
7 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
8 */
9
10
11#ifndef STE_DMA40_H
12#define STE_DMA40_H
13
14#include <linux/dmaengine.h>
15#include <linux/workqueue.h>
16#include <linux/interrupt.h>
17#include <linux/dmaengine.h>
18
19/* dev types for memcpy */
20#define STEDMA40_DEV_DST_MEMORY (-1)
21#define STEDMA40_DEV_SRC_MEMORY (-1)
22
23/*
24 * Description of bitfields of channel_type variable is available in
25 * the info structure.
26 */
27
28/* Priority */
29#define STEDMA40_INFO_PRIO_TYPE_POS 2
30#define STEDMA40_HIGH_PRIORITY_CHANNEL (0x1 << STEDMA40_INFO_PRIO_TYPE_POS)
31#define STEDMA40_LOW_PRIORITY_CHANNEL (0x2 << STEDMA40_INFO_PRIO_TYPE_POS)
32
33/* Mode */
34#define STEDMA40_INFO_CH_MODE_TYPE_POS 6
35#define STEDMA40_CHANNEL_IN_PHY_MODE (0x1 << STEDMA40_INFO_CH_MODE_TYPE_POS)
36#define STEDMA40_CHANNEL_IN_LOG_MODE (0x2 << STEDMA40_INFO_CH_MODE_TYPE_POS)
37#define STEDMA40_CHANNEL_IN_OPER_MODE (0x3 << STEDMA40_INFO_CH_MODE_TYPE_POS)
38
39/* Mode options */
40#define STEDMA40_INFO_CH_MODE_OPT_POS 8
41#define STEDMA40_PCHAN_BASIC_MODE (0x1 << STEDMA40_INFO_CH_MODE_OPT_POS)
42#define STEDMA40_PCHAN_MODULO_MODE (0x2 << STEDMA40_INFO_CH_MODE_OPT_POS)
43#define STEDMA40_PCHAN_DOUBLE_DST_MODE (0x3 << STEDMA40_INFO_CH_MODE_OPT_POS)
44#define STEDMA40_LCHAN_SRC_PHY_DST_LOG (0x1 << STEDMA40_INFO_CH_MODE_OPT_POS)
45#define STEDMA40_LCHAN_SRC_LOG_DST_PHS (0x2 << STEDMA40_INFO_CH_MODE_OPT_POS)
46#define STEDMA40_LCHAN_SRC_LOG_DST_LOG (0x3 << STEDMA40_INFO_CH_MODE_OPT_POS)
47
48/* Interrupt */
49#define STEDMA40_INFO_TIM_POS 10
50#define STEDMA40_NO_TIM_FOR_LINK (0x0 << STEDMA40_INFO_TIM_POS)
51#define STEDMA40_TIM_FOR_LINK (0x1 << STEDMA40_INFO_TIM_POS)
52
53/* End of channel_type configuration */
54
55#define STEDMA40_ESIZE_8_BIT 0x0
56#define STEDMA40_ESIZE_16_BIT 0x1
57#define STEDMA40_ESIZE_32_BIT 0x2
58#define STEDMA40_ESIZE_64_BIT 0x3
59
60/* The value 4 indicates that PEN-reg shall be set to 0 */
61#define STEDMA40_PSIZE_PHY_1 0x4
62#define STEDMA40_PSIZE_PHY_2 0x0
63#define STEDMA40_PSIZE_PHY_4 0x1
64#define STEDMA40_PSIZE_PHY_8 0x2
65#define STEDMA40_PSIZE_PHY_16 0x3
66
67/*
68 * The number of elements differ in logical and
69 * physical mode
70 */
71#define STEDMA40_PSIZE_LOG_1 STEDMA40_PSIZE_PHY_2
72#define STEDMA40_PSIZE_LOG_4 STEDMA40_PSIZE_PHY_4
73#define STEDMA40_PSIZE_LOG_8 STEDMA40_PSIZE_PHY_8
74#define STEDMA40_PSIZE_LOG_16 STEDMA40_PSIZE_PHY_16
75
76enum stedma40_flow_ctrl {
77 STEDMA40_NO_FLOW_CTRL,
78 STEDMA40_FLOW_CTRL,
79};
80
81enum stedma40_endianess {
82 STEDMA40_LITTLE_ENDIAN,
83 STEDMA40_BIG_ENDIAN
84};
85
86enum stedma40_periph_data_width {
87 STEDMA40_BYTE_WIDTH = STEDMA40_ESIZE_8_BIT,
88 STEDMA40_HALFWORD_WIDTH = STEDMA40_ESIZE_16_BIT,
89 STEDMA40_WORD_WIDTH = STEDMA40_ESIZE_32_BIT,
90 STEDMA40_DOUBLEWORD_WIDTH = STEDMA40_ESIZE_64_BIT
91};
92
93struct stedma40_half_channel_info {
94 enum stedma40_endianess endianess;
95 enum stedma40_periph_data_width data_width;
96 int psize;
97 enum stedma40_flow_ctrl flow_ctrl;
98};
99
100enum stedma40_xfer_dir {
101 STEDMA40_MEM_TO_MEM,
102 STEDMA40_MEM_TO_PERIPH,
103 STEDMA40_PERIPH_TO_MEM,
104 STEDMA40_PERIPH_TO_PERIPH
105};
106
107
108/**
109 * struct stedma40_chan_cfg - Structure to be filled by client drivers.
110 *
111 * @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH
112 * @channel_type: priority, mode, mode options and interrupt configuration.
113 * @src_dev_type: Src device type
114 * @dst_dev_type: Dst device type
115 * @src_info: Parameters for dst half channel
116 * @dst_info: Parameters for dst half channel
117 * @pre_transfer_data: Data to be passed on to the pre_transfer() function.
118 * @pre_transfer: Callback used if needed before preparation of transfer.
119 * Only called if device is set. size of bytes to transfer
120 * (in case of multiple element transfer size is size of the first element).
121 *
122 *
123 * This structure has to be filled by the client drivers.
124 * It is recommended to do all dma configurations for clients in the machine.
125 *
126 */
127struct stedma40_chan_cfg {
128 enum stedma40_xfer_dir dir;
129 unsigned int channel_type;
130 int src_dev_type;
131 int dst_dev_type;
132 struct stedma40_half_channel_info src_info;
133 struct stedma40_half_channel_info dst_info;
134 void *pre_transfer_data;
135 int (*pre_transfer) (struct dma_chan *chan,
136 void *data,
137 int size);
138};
139
140/**
141 * struct stedma40_platform_data - Configuration struct for the dma device.
142 *
143 * @dev_len: length of dev_tx and dev_rx
144 * @dev_tx: mapping between destination event line and io address
145 * @dev_rx: mapping between source event line and io address
146 * @memcpy: list of memcpy event lines
147 * @memcpy_len: length of memcpy
148 * @memcpy_conf_phy: default configuration of physical channel memcpy
149 * @memcpy_conf_log: default configuration of logical channel memcpy
150 * @llis_per_log: number of max linked list items per logical channel
151 *
152 */
153struct stedma40_platform_data {
154 u32 dev_len;
155 const dma_addr_t *dev_tx;
156 const dma_addr_t *dev_rx;
157 int *memcpy;
158 u32 memcpy_len;
159 struct stedma40_chan_cfg *memcpy_conf_phy;
160 struct stedma40_chan_cfg *memcpy_conf_log;
161 unsigned int llis_per_log;
162};
163
164/**
165 * setdma40_set_psize() - Used for changing the package size of an
166 * already configured dma channel.
167 *
168 * @chan: dmaengine handle
169 * @src_psize: new package side for src. (STEDMA40_PSIZE*)
170 * @src_psize: new package side for dst. (STEDMA40_PSIZE*)
171 *
172 * returns 0 on ok, otherwise negative error number.
173 */
174int stedma40_set_psize(struct dma_chan *chan,
175 int src_psize,
176 int dst_psize);
177
178/**
179 * stedma40_filter() - Provides stedma40_chan_cfg to the
180 * ste_dma40 dma driver via the dmaengine framework.
181 * does some checking of what's provided.
182 *
183 * Never directly called by client. It used by dmaengine.
184 * @chan: dmaengine handle.
185 * @data: Must be of type: struct stedma40_chan_cfg and is
186 * the configuration of the framework.
187 *
188 *
189 */
190
191bool stedma40_filter(struct dma_chan *chan, void *data);
192
193/**
194 * stedma40_memcpy_sg() - extension of the dma framework, memcpy to/from
195 * scattergatter lists.
196 *
197 * @chan: dmaengine handle
198 * @sgl_dst: Destination scatter list
199 * @sgl_src: Source scatter list
200 * @sgl_len: The length of each scatterlist. Both lists must be of equal length
201 * and each element must match the corresponding element in the other scatter
202 * list.
203 * @flags: is actually enum dma_ctrl_flags. See dmaengine.h
204 */
205
206struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
207 struct scatterlist *sgl_dst,
208 struct scatterlist *sgl_src,
209 unsigned int sgl_len,
210 unsigned long flags);
211
212/**
213 * stedma40_slave_mem() - Transfers a raw data buffer to or from a slave
214 * (=device)
215 *
216 * @chan: dmaengine handle
217 * @addr: source or destination physicall address.
218 * @size: bytes to transfer
219 * @direction: direction of transfer
220 * @flags: is actually enum dma_ctrl_flags. See dmaengine.h
221 */
222
223static inline struct
224dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
225 dma_addr_t addr,
226 unsigned int size,
227 enum dma_data_direction direction,
228 unsigned long flags)
229{
230 struct scatterlist sg;
231 sg_init_table(&sg, 1);
232 sg.dma_address = addr;
233 sg.length = size;
234
235 return chan->device->device_prep_slave_sg(chan, &sg, 1,
236 direction, flags);
237}
238
239#endif
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index a2fcb2ead892..1b8877922fb0 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -141,6 +141,13 @@ config COH901318
141 help 141 help
142 Enable support for ST-Ericsson COH 901 318 DMA. 142 Enable support for ST-Ericsson COH 901 318 DMA.
143 143
144config STE_DMA40
145 bool "ST-Ericsson DMA40 support"
146 depends on ARCH_U8500
147 select DMA_ENGINE
148 help
149 Support for ST-Ericsson DMA40 controller
150
144config AMCC_PPC440SPE_ADMA 151config AMCC_PPC440SPE_ADMA
145 tristate "AMCC PPC440SPe ADMA support" 152 tristate "AMCC PPC440SPe ADMA support"
146 depends on 440SPe || 440SP 153 depends on 440SPe || 440SP
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 40c627d8f73b..20881426c1ac 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -21,3 +21,4 @@ obj-$(CONFIG_SH_DMAE) += shdma.o
21obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o 21obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
22obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ 22obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
23obj-$(CONFIG_TIMB_DMA) += timb_dma.o 23obj-$(CONFIG_TIMB_DMA) += timb_dma.o
24obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
new file mode 100644
index 000000000000..e4295a27672b
--- /dev/null
+++ b/drivers/dma/ste_dma40.c
@@ -0,0 +1,2596 @@
1/*
2 * driver/dma/ste_dma40.c
3 *
4 * Copyright (C) ST-Ericsson 2007-2010
5 * License terms: GNU General Public License (GPL) version 2
6 * Author: Per Friden <per.friden@stericsson.com>
7 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
8 *
9 */
10
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/dmaengine.h>
14#include <linux/platform_device.h>
15#include <linux/clk.h>
16#include <linux/delay.h>
17
18#include <plat/ste_dma40.h>
19
20#include "ste_dma40_ll.h"
21
22#define D40_NAME "dma40"
23
24#define D40_PHY_CHAN -1
25
26/* For masking out/in 2 bit channel positions */
27#define D40_CHAN_POS(chan) (2 * (chan / 2))
28#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
29
30/* Maximum iterations taken before giving up suspending a channel */
31#define D40_SUSPEND_MAX_IT 500
32
33#define D40_ALLOC_FREE (1 << 31)
34#define D40_ALLOC_PHY (1 << 30)
35#define D40_ALLOC_LOG_FREE 0
36
37/* The number of free d40_desc to keep in memory before starting
38 * to kfree() them */
39#define D40_DESC_CACHE_SIZE 50
40
41/* Hardware designer of the block */
42#define D40_PERIPHID2_DESIGNER 0x8
43
44/**
45 * enum 40_command - The different commands and/or statuses.
46 *
47 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
48 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
49 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
50 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
51 */
52enum d40_command {
53 D40_DMA_STOP = 0,
54 D40_DMA_RUN = 1,
55 D40_DMA_SUSPEND_REQ = 2,
56 D40_DMA_SUSPENDED = 3
57};
58
59/**
60 * struct d40_lli_pool - Structure for keeping LLIs in memory
61 *
62 * @base: Pointer to memory area when the pre_alloc_lli's are not large
63 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
64 * pre_alloc_lli is used.
65 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
66 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
67 * one buffer to one buffer.
68 */
69struct d40_lli_pool {
70 void *base;
71 int size;
72 /* Space for dst and src, plus an extra for padding */
73 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
74};
75
76/**
77 * struct d40_desc - A descriptor is one DMA job.
78 *
79 * @lli_phy: LLI settings for physical channel. Both src and dst=
80 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
81 * lli_len equals one.
82 * @lli_log: Same as above but for logical channels.
83 * @lli_pool: The pool with two entries pre-allocated.
84 * @lli_len: Number of LLI's in lli_pool
85 * @lli_tcount: Number of LLIs processed in the transfer. When equals lli_len
86 * then this transfer job is done.
87 * @txd: DMA engine struct. Used for among other things for communication
88 * during a transfer.
89 * @node: List entry.
90 * @dir: The transfer direction of this job.
91 * @is_in_client_list: true if the client owns this descriptor.
92 *
93 * This descriptor is used for both logical and physical transfers.
94 */
95
96struct d40_desc {
97 /* LLI physical */
98 struct d40_phy_lli_bidir lli_phy;
99 /* LLI logical */
100 struct d40_log_lli_bidir lli_log;
101
102 struct d40_lli_pool lli_pool;
103 u32 lli_len;
104 u32 lli_tcount;
105
106 struct dma_async_tx_descriptor txd;
107 struct list_head node;
108
109 enum dma_data_direction dir;
110 bool is_in_client_list;
111};
112
113/**
114 * struct d40_lcla_pool - LCLA pool settings and data.
115 *
116 * @base: The virtual address of LCLA.
117 * @phy: Physical base address of LCLA.
118 * @base_size: size of lcla.
119 * @lock: Lock to protect the content in this struct.
120 * @alloc_map: Mapping between physical channel and LCLA entries.
121 * @num_blocks: The number of entries of alloc_map. Equals to the
122 * number of physical channels.
123 */
124struct d40_lcla_pool {
125 void *base;
126 dma_addr_t phy;
127 resource_size_t base_size;
128 spinlock_t lock;
129 u32 *alloc_map;
130 int num_blocks;
131};
132
133/**
134 * struct d40_phy_res - struct for handling eventlines mapped to physical
135 * channels.
136 *
137 * @lock: A lock protection this entity.
138 * @num: The physical channel number of this entity.
139 * @allocated_src: Bit mapped to show which src event line's are mapped to
140 * this physical channel. Can also be free or physically allocated.
141 * @allocated_dst: Same as for src but is dst.
142 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
143 * event line number. Both allocated_src and allocated_dst can not be
144 * allocated to a physical channel, since the interrupt handler has then
145 * no way of figure out which one the interrupt belongs to.
146 */
147struct d40_phy_res {
148 spinlock_t lock;
149 int num;
150 u32 allocated_src;
151 u32 allocated_dst;
152};
153
154struct d40_base;
155
156/**
157 * struct d40_chan - Struct that describes a channel.
158 *
159 * @lock: A spinlock to protect this struct.
160 * @log_num: The logical number, if any of this channel.
161 * @completed: Starts with 1, after first interrupt it is set to dma engine's
162 * current cookie.
163 * @pending_tx: The number of pending transfers. Used between interrupt handler
164 * and tasklet.
165 * @busy: Set to true when transfer is ongoing on this channel.
166 * @phy_chan: Pointer to physical channel which this instance runs on.
167 * @chan: DMA engine handle.
168 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
169 * transfer and call client callback.
170 * @client: Cliented owned descriptor list.
171 * @active: Active descriptor.
172 * @queue: Queued jobs.
173 * @free: List of free descripts, ready to be reused.
174 * @free_len: Number of descriptors in the free list.
175 * @dma_cfg: The client configuration of this dma channel.
176 * @base: Pointer to the device instance struct.
177 * @src_def_cfg: Default cfg register setting for src.
178 * @dst_def_cfg: Default cfg register setting for dst.
179 * @log_def: Default logical channel settings.
180 * @lcla: Space for one dst src pair for logical channel transfers.
181 * @lcpa: Pointer to dst and src lcpa settings.
182 *
183 * This struct can either "be" a logical or a physical channel.
184 */
185struct d40_chan {
186 spinlock_t lock;
187 int log_num;
188 /* ID of the most recent completed transfer */
189 int completed;
190 int pending_tx;
191 bool busy;
192 struct d40_phy_res *phy_chan;
193 struct dma_chan chan;
194 struct tasklet_struct tasklet;
195 struct list_head client;
196 struct list_head active;
197 struct list_head queue;
198 struct list_head free;
199 int free_len;
200 struct stedma40_chan_cfg dma_cfg;
201 struct d40_base *base;
202 /* Default register configurations */
203 u32 src_def_cfg;
204 u32 dst_def_cfg;
205 struct d40_def_lcsp log_def;
206 struct d40_lcla_elem lcla;
207 struct d40_log_lli_full *lcpa;
208};
209
210/**
211 * struct d40_base - The big global struct, one for each probe'd instance.
212 *
213 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
214 * @execmd_lock: Lock for execute command usage since several channels share
215 * the same physical register.
216 * @dev: The device structure.
217 * @virtbase: The virtual base address of the DMA's register.
218 * @clk: Pointer to the DMA clock structure.
219 * @phy_start: Physical memory start of the DMA registers.
220 * @phy_size: Size of the DMA register map.
221 * @irq: The IRQ number.
222 * @num_phy_chans: The number of physical channels. Read from HW. This
223 * is the number of available channels for this driver, not counting "Secure
224 * mode" allocated physical channels.
225 * @num_log_chans: The number of logical channels. Calculated from
226 * num_phy_chans.
227 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
228 * @dma_slave: dma_device channels that can do only do slave transfers.
229 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
230 * @phy_chans: Room for all possible physical channels in system.
231 * @log_chans: Room for all possible logical channels in system.
232 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
233 * to log_chans entries.
234 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
235 * to phy_chans entries.
236 * @plat_data: Pointer to provided platform_data which is the driver
237 * configuration.
238 * @phy_res: Vector containing all physical channels.
239 * @lcla_pool: lcla pool settings and data.
240 * @lcpa_base: The virtual mapped address of LCPA.
241 * @phy_lcpa: The physical address of the LCPA.
242 * @lcpa_size: The size of the LCPA area.
243 */
244struct d40_base {
245 spinlock_t interrupt_lock;
246 spinlock_t execmd_lock;
247 struct device *dev;
248 void __iomem *virtbase;
249 struct clk *clk;
250 phys_addr_t phy_start;
251 resource_size_t phy_size;
252 int irq;
253 int num_phy_chans;
254 int num_log_chans;
255 struct dma_device dma_both;
256 struct dma_device dma_slave;
257 struct dma_device dma_memcpy;
258 struct d40_chan *phy_chans;
259 struct d40_chan *log_chans;
260 struct d40_chan **lookup_log_chans;
261 struct d40_chan **lookup_phy_chans;
262 struct stedma40_platform_data *plat_data;
263 /* Physical half channels */
264 struct d40_phy_res *phy_res;
265 struct d40_lcla_pool lcla_pool;
266 void *lcpa_base;
267 dma_addr_t phy_lcpa;
268 resource_size_t lcpa_size;
269};
270
271/**
272 * struct d40_interrupt_lookup - lookup table for interrupt handler
273 *
274 * @src: Interrupt mask register.
275 * @clr: Interrupt clear register.
276 * @is_error: true if this is an error interrupt.
277 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
278 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
279 */
280struct d40_interrupt_lookup {
281 u32 src;
282 u32 clr;
283 bool is_error;
284 int offset;
285};
286
287/**
288 * struct d40_reg_val - simple lookup struct
289 *
290 * @reg: The register.
291 * @val: The value that belongs to the register in reg.
292 */
293struct d40_reg_val {
294 unsigned int reg;
295 unsigned int val;
296};
297
298static int d40_pool_lli_alloc(struct d40_desc *d40d,
299 int lli_len, bool is_log)
300{
301 u32 align;
302 void *base;
303
304 if (is_log)
305 align = sizeof(struct d40_log_lli);
306 else
307 align = sizeof(struct d40_phy_lli);
308
309 if (lli_len == 1) {
310 base = d40d->lli_pool.pre_alloc_lli;
311 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
312 d40d->lli_pool.base = NULL;
313 } else {
314 d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
315
316 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
317 d40d->lli_pool.base = base;
318
319 if (d40d->lli_pool.base == NULL)
320 return -ENOMEM;
321 }
322
323 if (is_log) {
324 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
325 align);
326 d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
327 align);
328 } else {
329 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
330 align);
331 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
332 align);
333
334 d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src);
335 d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst);
336 }
337
338 return 0;
339}
340
341static void d40_pool_lli_free(struct d40_desc *d40d)
342{
343 kfree(d40d->lli_pool.base);
344 d40d->lli_pool.base = NULL;
345 d40d->lli_pool.size = 0;
346 d40d->lli_log.src = NULL;
347 d40d->lli_log.dst = NULL;
348 d40d->lli_phy.src = NULL;
349 d40d->lli_phy.dst = NULL;
350 d40d->lli_phy.src_addr = 0;
351 d40d->lli_phy.dst_addr = 0;
352}
353
354static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
355 struct d40_desc *desc)
356{
357 dma_cookie_t cookie = d40c->chan.cookie;
358
359 if (++cookie < 0)
360 cookie = 1;
361
362 d40c->chan.cookie = cookie;
363 desc->txd.cookie = cookie;
364
365 return cookie;
366}
367
368static void d40_desc_reset(struct d40_desc *d40d)
369{
370 d40d->lli_tcount = 0;
371}
372
373static void d40_desc_remove(struct d40_desc *d40d)
374{
375 list_del(&d40d->node);
376}
377
378static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
379{
380 struct d40_desc *desc;
381 struct d40_desc *d;
382 struct d40_desc *_d;
383
384 if (!list_empty(&d40c->client)) {
385 list_for_each_entry_safe(d, _d, &d40c->client, node)
386 if (async_tx_test_ack(&d->txd)) {
387 d40_pool_lli_free(d);
388 d40_desc_remove(d);
389 desc = d;
390 goto out;
391 }
392 }
393
394 if (list_empty(&d40c->free)) {
395 /* Alloc new desc because we're out of used ones */
396 desc = kzalloc(sizeof(struct d40_desc), GFP_NOWAIT);
397 if (desc == NULL)
398 goto out;
399 INIT_LIST_HEAD(&desc->node);
400 } else {
401 /* Reuse an old desc. */
402 desc = list_first_entry(&d40c->free,
403 struct d40_desc,
404 node);
405 list_del(&desc->node);
406 d40c->free_len--;
407 }
408out:
409 return desc;
410}
411
412static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
413{
414 if (d40c->free_len < D40_DESC_CACHE_SIZE) {
415 list_add_tail(&d40d->node, &d40c->free);
416 d40c->free_len++;
417 } else
418 kfree(d40d);
419}
420
421static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
422{
423 list_add_tail(&desc->node, &d40c->active);
424}
425
426static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
427{
428 struct d40_desc *d;
429
430 if (list_empty(&d40c->active))
431 return NULL;
432
433 d = list_first_entry(&d40c->active,
434 struct d40_desc,
435 node);
436 return d;
437}
438
439static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
440{
441 list_add_tail(&desc->node, &d40c->queue);
442}
443
444static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
445{
446 struct d40_desc *d;
447
448 if (list_empty(&d40c->queue))
449 return NULL;
450
451 d = list_first_entry(&d40c->queue,
452 struct d40_desc,
453 node);
454 return d;
455}
456
457/* Support functions for logical channels */
458
459static int d40_lcla_id_get(struct d40_chan *d40c,
460 struct d40_lcla_pool *pool)
461{
462 int src_id = 0;
463 int dst_id = 0;
464 struct d40_log_lli *lcla_lidx_base =
465 pool->base + d40c->phy_chan->num * 1024;
466 int i;
467 int lli_per_log = d40c->base->plat_data->llis_per_log;
468
469 if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
470 return 0;
471
472 if (pool->num_blocks > 32)
473 return -EINVAL;
474
475 spin_lock(&pool->lock);
476
477 for (i = 0; i < pool->num_blocks; i++) {
478 if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
479 pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
480 break;
481 }
482 }
483 src_id = i;
484 if (src_id >= pool->num_blocks)
485 goto err;
486
487 for (; i < pool->num_blocks; i++) {
488 if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
489 pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
490 break;
491 }
492 }
493
494 dst_id = i;
495 if (dst_id == src_id)
496 goto err;
497
498 d40c->lcla.src_id = src_id;
499 d40c->lcla.dst_id = dst_id;
500 d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
501 d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
502
503
504 spin_unlock(&pool->lock);
505 return 0;
506err:
507 spin_unlock(&pool->lock);
508 return -EINVAL;
509}
510
511static void d40_lcla_id_put(struct d40_chan *d40c,
512 struct d40_lcla_pool *pool,
513 int id)
514{
515 if (id < 0)
516 return;
517
518 d40c->lcla.src_id = -1;
519 d40c->lcla.dst_id = -1;
520
521 spin_lock(&pool->lock);
522 pool->alloc_map[d40c->phy_chan->num] &= (~(0x1 << id));
523 spin_unlock(&pool->lock);
524}
525
526static int d40_channel_execute_command(struct d40_chan *d40c,
527 enum d40_command command)
528{
529 int status, i;
530 void __iomem *active_reg;
531 int ret = 0;
532 unsigned long flags;
533
534 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
535
536 if (d40c->phy_chan->num % 2 == 0)
537 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
538 else
539 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
540
541 if (command == D40_DMA_SUSPEND_REQ) {
542 status = (readl(active_reg) &
543 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
544 D40_CHAN_POS(d40c->phy_chan->num);
545
546 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
547 goto done;
548 }
549
550 writel(command << D40_CHAN_POS(d40c->phy_chan->num), active_reg);
551
552 if (command == D40_DMA_SUSPEND_REQ) {
553
554 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
555 status = (readl(active_reg) &
556 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
557 D40_CHAN_POS(d40c->phy_chan->num);
558
559 cpu_relax();
560 /*
561 * Reduce the number of bus accesses while
562 * waiting for the DMA to suspend.
563 */
564 udelay(3);
565
566 if (status == D40_DMA_STOP ||
567 status == D40_DMA_SUSPENDED)
568 break;
569 }
570
571 if (i == D40_SUSPEND_MAX_IT) {
572 dev_err(&d40c->chan.dev->device,
573 "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
574 __func__, d40c->phy_chan->num, d40c->log_num,
575 status);
576 dump_stack();
577 ret = -EBUSY;
578 }
579
580 }
581done:
582 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
583 return ret;
584}
585
586static void d40_term_all(struct d40_chan *d40c)
587{
588 struct d40_desc *d40d;
589 struct d40_desc *d;
590 struct d40_desc *_d;
591
592 /* Release active descriptors */
593 while ((d40d = d40_first_active_get(d40c))) {
594 d40_desc_remove(d40d);
595
596 /* Return desc to free-list */
597 d40_desc_free(d40c, d40d);
598 }
599
600 /* Release queued descriptors waiting for transfer */
601 while ((d40d = d40_first_queued(d40c))) {
602 d40_desc_remove(d40d);
603
604 /* Return desc to free-list */
605 d40_desc_free(d40c, d40d);
606 }
607
608 /* Release client owned descriptors */
609 if (!list_empty(&d40c->client))
610 list_for_each_entry_safe(d, _d, &d40c->client, node) {
611 d40_pool_lli_free(d);
612 d40_desc_remove(d);
613 /* Return desc to free-list */
614 d40_desc_free(d40c, d40d);
615 }
616
617 d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
618 d40c->lcla.src_id);
619 d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
620 d40c->lcla.dst_id);
621
622 d40c->pending_tx = 0;
623 d40c->busy = false;
624}
625
626static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
627{
628 u32 val;
629 unsigned long flags;
630
631 if (do_enable)
632 val = D40_ACTIVATE_EVENTLINE;
633 else
634 val = D40_DEACTIVATE_EVENTLINE;
635
636 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
637
638 /* Enable event line connected to device (or memcpy) */
639 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
640 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
641 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
642
643 writel((val << D40_EVENTLINE_POS(event)) |
644 ~D40_EVENTLINE_MASK(event),
645 d40c->base->virtbase + D40_DREG_PCBASE +
646 d40c->phy_chan->num * D40_DREG_PCDELTA +
647 D40_CHAN_REG_SSLNK);
648 }
649 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
650 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
651
652 writel((val << D40_EVENTLINE_POS(event)) |
653 ~D40_EVENTLINE_MASK(event),
654 d40c->base->virtbase + D40_DREG_PCBASE +
655 d40c->phy_chan->num * D40_DREG_PCDELTA +
656 D40_CHAN_REG_SDLNK);
657 }
658
659 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
660}
661
662static bool d40_chan_has_events(struct d40_chan *d40c)
663{
664 u32 val = 0;
665
666 /* If SSLNK or SDLNK is zero all events are disabled */
667 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
668 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
669 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
670 d40c->phy_chan->num * D40_DREG_PCDELTA +
671 D40_CHAN_REG_SSLNK);
672
673 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM)
674 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
675 d40c->phy_chan->num * D40_DREG_PCDELTA +
676 D40_CHAN_REG_SDLNK);
677 return (bool) val;
678}
679
680static void d40_config_enable_lidx(struct d40_chan *d40c)
681{
682 /* Set LIDX for lcla */
683 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
684 D40_SREG_ELEM_LOG_LIDX_MASK,
685 d40c->base->virtbase + D40_DREG_PCBASE +
686 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
687
688 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
689 D40_SREG_ELEM_LOG_LIDX_MASK,
690 d40c->base->virtbase + D40_DREG_PCBASE +
691 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
692}
693
694static int d40_config_write(struct d40_chan *d40c)
695{
696 u32 addr_base;
697 u32 var;
698 int res;
699
700 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
701 if (res)
702 return res;
703
704 /* Odd addresses are even addresses + 4 */
705 addr_base = (d40c->phy_chan->num % 2) * 4;
706 /* Setup channel mode to logical or physical */
707 var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
708 D40_CHAN_POS(d40c->phy_chan->num);
709 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
710
711 /* Setup operational mode option register */
712 var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
713 0x3) << D40_CHAN_POS(d40c->phy_chan->num);
714
715 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
716
717 if (d40c->log_num != D40_PHY_CHAN) {
718 /* Set default config for CFG reg */
719 writel(d40c->src_def_cfg,
720 d40c->base->virtbase + D40_DREG_PCBASE +
721 d40c->phy_chan->num * D40_DREG_PCDELTA +
722 D40_CHAN_REG_SSCFG);
723 writel(d40c->dst_def_cfg,
724 d40c->base->virtbase + D40_DREG_PCBASE +
725 d40c->phy_chan->num * D40_DREG_PCDELTA +
726 D40_CHAN_REG_SDCFG);
727
728 d40_config_enable_lidx(d40c);
729 }
730 return res;
731}
732
733static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
734{
735
736 if (d40d->lli_phy.dst && d40d->lli_phy.src) {
737 d40_phy_lli_write(d40c->base->virtbase,
738 d40c->phy_chan->num,
739 d40d->lli_phy.dst,
740 d40d->lli_phy.src);
741 d40d->lli_tcount = d40d->lli_len;
742 } else if (d40d->lli_log.dst && d40d->lli_log.src) {
743 u32 lli_len;
744 struct d40_log_lli *src = d40d->lli_log.src;
745 struct d40_log_lli *dst = d40d->lli_log.dst;
746
747 src += d40d->lli_tcount;
748 dst += d40d->lli_tcount;
749
750 if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
751 lli_len = d40d->lli_len;
752 else
753 lli_len = d40c->base->plat_data->llis_per_log;
754 d40d->lli_tcount += lli_len;
755 d40_log_lli_write(d40c->lcpa, d40c->lcla.src,
756 d40c->lcla.dst,
757 dst, src,
758 d40c->base->plat_data->llis_per_log);
759 }
760}
761
762static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
763{
764 struct d40_chan *d40c = container_of(tx->chan,
765 struct d40_chan,
766 chan);
767 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
768 unsigned long flags;
769
770 spin_lock_irqsave(&d40c->lock, flags);
771
772 tx->cookie = d40_assign_cookie(d40c, d40d);
773
774 d40_desc_queue(d40c, d40d);
775
776 spin_unlock_irqrestore(&d40c->lock, flags);
777
778 return tx->cookie;
779}
780
781static int d40_start(struct d40_chan *d40c)
782{
783 int err;
784
785 if (d40c->log_num != D40_PHY_CHAN) {
786 err = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
787 if (err)
788 return err;
789 d40_config_set_event(d40c, true);
790 }
791
792 err = d40_channel_execute_command(d40c, D40_DMA_RUN);
793
794 return err;
795}
796
797static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
798{
799 struct d40_desc *d40d;
800 int err;
801
802 /* Start queued jobs, if any */
803 d40d = d40_first_queued(d40c);
804
805 if (d40d != NULL) {
806 d40c->busy = true;
807
808 /* Remove from queue */
809 d40_desc_remove(d40d);
810
811 /* Add to active queue */
812 d40_desc_submit(d40c, d40d);
813
814 /* Initiate DMA job */
815 d40_desc_load(d40c, d40d);
816
817 /* Start dma job */
818 err = d40_start(d40c);
819
820 if (err)
821 return NULL;
822 }
823
824 return d40d;
825}
826
827/* called from interrupt context */
828static void dma_tc_handle(struct d40_chan *d40c)
829{
830 struct d40_desc *d40d;
831
832 if (!d40c->phy_chan)
833 return;
834
835 /* Get first active entry from list */
836 d40d = d40_first_active_get(d40c);
837
838 if (d40d == NULL)
839 return;
840
841 if (d40d->lli_tcount < d40d->lli_len) {
842
843 d40_desc_load(d40c, d40d);
844 /* Start dma job */
845 (void) d40_start(d40c);
846 return;
847 }
848
849 if (d40_queue_start(d40c) == NULL)
850 d40c->busy = false;
851
852 d40c->pending_tx++;
853 tasklet_schedule(&d40c->tasklet);
854
855}
856
857static void dma_tasklet(unsigned long data)
858{
859 struct d40_chan *d40c = (struct d40_chan *) data;
860 struct d40_desc *d40d_fin;
861 unsigned long flags;
862 dma_async_tx_callback callback;
863 void *callback_param;
864
865 spin_lock_irqsave(&d40c->lock, flags);
866
867 /* Get first active entry from list */
868 d40d_fin = d40_first_active_get(d40c);
869
870 if (d40d_fin == NULL)
871 goto err;
872
873 d40c->completed = d40d_fin->txd.cookie;
874
875 /*
876 * If terminating a channel pending_tx is set to zero.
877 * This prevents any finished active jobs to return to the client.
878 */
879 if (d40c->pending_tx == 0) {
880 spin_unlock_irqrestore(&d40c->lock, flags);
881 return;
882 }
883
884 /* Callback to client */
885 callback = d40d_fin->txd.callback;
886 callback_param = d40d_fin->txd.callback_param;
887
888 if (async_tx_test_ack(&d40d_fin->txd)) {
889 d40_pool_lli_free(d40d_fin);
890 d40_desc_remove(d40d_fin);
891 /* Return desc to free-list */
892 d40_desc_free(d40c, d40d_fin);
893 } else {
894 d40_desc_reset(d40d_fin);
895 if (!d40d_fin->is_in_client_list) {
896 d40_desc_remove(d40d_fin);
897 list_add_tail(&d40d_fin->node, &d40c->client);
898 d40d_fin->is_in_client_list = true;
899 }
900 }
901
902 d40c->pending_tx--;
903
904 if (d40c->pending_tx)
905 tasklet_schedule(&d40c->tasklet);
906
907 spin_unlock_irqrestore(&d40c->lock, flags);
908
909 if (callback)
910 callback(callback_param);
911
912 return;
913
914 err:
915 /* Rescue manouver if receiving double interrupts */
916 if (d40c->pending_tx > 0)
917 d40c->pending_tx--;
918 spin_unlock_irqrestore(&d40c->lock, flags);
919}
920
921static irqreturn_t d40_handle_interrupt(int irq, void *data)
922{
923 static const struct d40_interrupt_lookup il[] = {
924 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
925 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
926 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
927 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
928 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
929 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
930 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
931 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
932 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
933 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
934 };
935
936 int i;
937 u32 regs[ARRAY_SIZE(il)];
938 u32 tmp;
939 u32 idx;
940 u32 row;
941 long chan = -1;
942 struct d40_chan *d40c;
943 unsigned long flags;
944 struct d40_base *base = data;
945
946 spin_lock_irqsave(&base->interrupt_lock, flags);
947
948 /* Read interrupt status of both logical and physical channels */
949 for (i = 0; i < ARRAY_SIZE(il); i++)
950 regs[i] = readl(base->virtbase + il[i].src);
951
952 for (;;) {
953
954 chan = find_next_bit((unsigned long *)regs,
955 BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
956
957 /* No more set bits found? */
958 if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
959 break;
960
961 row = chan / BITS_PER_LONG;
962 idx = chan & (BITS_PER_LONG - 1);
963
964 /* ACK interrupt */
965 tmp = readl(base->virtbase + il[row].clr);
966 tmp |= 1 << idx;
967 writel(tmp, base->virtbase + il[row].clr);
968
969 if (il[row].offset == D40_PHY_CHAN)
970 d40c = base->lookup_phy_chans[idx];
971 else
972 d40c = base->lookup_log_chans[il[row].offset + idx];
973 spin_lock(&d40c->lock);
974
975 if (!il[row].is_error)
976 dma_tc_handle(d40c);
977 else
978 dev_err(base->dev, "[%s] IRQ chan: %ld offset %d idx %d\n",
979 __func__, chan, il[row].offset, idx);
980
981 spin_unlock(&d40c->lock);
982 }
983
984 spin_unlock_irqrestore(&base->interrupt_lock, flags);
985
986 return IRQ_HANDLED;
987}
988
989
990static int d40_validate_conf(struct d40_chan *d40c,
991 struct stedma40_chan_cfg *conf)
992{
993 int res = 0;
994 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
995 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
996 bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
997 == STEDMA40_CHANNEL_IN_LOG_MODE;
998
999 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH &&
1000 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
1001 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
1002 __func__);
1003 res = -EINVAL;
1004 }
1005
1006 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM &&
1007 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
1008 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
1009 __func__);
1010 res = -EINVAL;
1011 }
1012
1013 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
1014 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
1015 dev_err(&d40c->chan.dev->device,
1016 "[%s] No event line\n", __func__);
1017 res = -EINVAL;
1018 }
1019
1020 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
1021 (src_event_group != dst_event_group)) {
1022 dev_err(&d40c->chan.dev->device,
1023 "[%s] Invalid event group\n", __func__);
1024 res = -EINVAL;
1025 }
1026
1027 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
1028 /*
1029 * DMAC HW supports it. Will be added to this driver,
1030 * in case any dma client requires it.
1031 */
1032 dev_err(&d40c->chan.dev->device,
1033 "[%s] periph to periph not supported\n",
1034 __func__);
1035 res = -EINVAL;
1036 }
1037
1038 return res;
1039}
1040
1041static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
1042 int log_event_line)
1043{
1044 unsigned long flags;
1045 spin_lock_irqsave(&phy->lock, flags);
1046 if (!log_event_line) {
1047 /* Physical interrupts are masked per physical full channel */
1048 if (phy->allocated_src == D40_ALLOC_FREE &&
1049 phy->allocated_dst == D40_ALLOC_FREE) {
1050 phy->allocated_dst = D40_ALLOC_PHY;
1051 phy->allocated_src = D40_ALLOC_PHY;
1052 goto found;
1053 } else
1054 goto not_found;
1055 }
1056
1057 /* Logical channel */
1058 if (is_src) {
1059 if (phy->allocated_src == D40_ALLOC_PHY)
1060 goto not_found;
1061
1062 if (phy->allocated_src == D40_ALLOC_FREE)
1063 phy->allocated_src = D40_ALLOC_LOG_FREE;
1064
1065 if (!(phy->allocated_src & (1 << log_event_line))) {
1066 phy->allocated_src |= 1 << log_event_line;
1067 goto found;
1068 } else
1069 goto not_found;
1070 } else {
1071 if (phy->allocated_dst == D40_ALLOC_PHY)
1072 goto not_found;
1073
1074 if (phy->allocated_dst == D40_ALLOC_FREE)
1075 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1076
1077 if (!(phy->allocated_dst & (1 << log_event_line))) {
1078 phy->allocated_dst |= 1 << log_event_line;
1079 goto found;
1080 } else
1081 goto not_found;
1082 }
1083
1084not_found:
1085 spin_unlock_irqrestore(&phy->lock, flags);
1086 return false;
1087found:
1088 spin_unlock_irqrestore(&phy->lock, flags);
1089 return true;
1090}
1091
1092static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1093 int log_event_line)
1094{
1095 unsigned long flags;
1096 bool is_free = false;
1097
1098 spin_lock_irqsave(&phy->lock, flags);
1099 if (!log_event_line) {
1100 /* Physical interrupts are masked per physical full channel */
1101 phy->allocated_dst = D40_ALLOC_FREE;
1102 phy->allocated_src = D40_ALLOC_FREE;
1103 is_free = true;
1104 goto out;
1105 }
1106
1107 /* Logical channel */
1108 if (is_src) {
1109 phy->allocated_src &= ~(1 << log_event_line);
1110 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1111 phy->allocated_src = D40_ALLOC_FREE;
1112 } else {
1113 phy->allocated_dst &= ~(1 << log_event_line);
1114 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1115 phy->allocated_dst = D40_ALLOC_FREE;
1116 }
1117
1118 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1119 D40_ALLOC_FREE);
1120
1121out:
1122 spin_unlock_irqrestore(&phy->lock, flags);
1123
1124 return is_free;
1125}
1126
1127static int d40_allocate_channel(struct d40_chan *d40c)
1128{
1129 int dev_type;
1130 int event_group;
1131 int event_line;
1132 struct d40_phy_res *phys;
1133 int i;
1134 int j;
1135 int log_num;
1136 bool is_src;
1137 bool is_log = (d40c->dma_cfg.channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
1138 == STEDMA40_CHANNEL_IN_LOG_MODE;
1139
1140
1141 phys = d40c->base->phy_res;
1142
1143 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1144 dev_type = d40c->dma_cfg.src_dev_type;
1145 log_num = 2 * dev_type;
1146 is_src = true;
1147 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1148 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1149 /* dst event lines are used for logical memcpy */
1150 dev_type = d40c->dma_cfg.dst_dev_type;
1151 log_num = 2 * dev_type + 1;
1152 is_src = false;
1153 } else
1154 return -EINVAL;
1155
1156 event_group = D40_TYPE_TO_GROUP(dev_type);
1157 event_line = D40_TYPE_TO_EVENT(dev_type);
1158
1159 if (!is_log) {
1160 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1161 /* Find physical half channel */
1162 for (i = 0; i < d40c->base->num_phy_chans; i++) {
1163
1164 if (d40_alloc_mask_set(&phys[i], is_src, 0))
1165 goto found_phy;
1166 }
1167 } else
1168 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1169 int phy_num = j + event_group * 2;
1170 for (i = phy_num; i < phy_num + 2; i++) {
1171 if (d40_alloc_mask_set(&phys[i],
1172 is_src, 0))
1173 goto found_phy;
1174 }
1175 }
1176 return -EINVAL;
1177found_phy:
1178 d40c->phy_chan = &phys[i];
1179 d40c->log_num = D40_PHY_CHAN;
1180 goto out;
1181 }
1182 if (dev_type == -1)
1183 return -EINVAL;
1184
1185 /* Find logical channel */
1186 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1187 int phy_num = j + event_group * 2;
1188 /*
1189 * Spread logical channels across all available physical rather
1190 * than pack every logical channel at the first available phy
1191 * channels.
1192 */
1193 if (is_src) {
1194 for (i = phy_num; i < phy_num + 2; i++) {
1195 if (d40_alloc_mask_set(&phys[i], is_src,
1196 event_line))
1197 goto found_log;
1198 }
1199 } else {
1200 for (i = phy_num + 1; i >= phy_num; i--) {
1201 if (d40_alloc_mask_set(&phys[i], is_src,
1202 event_line))
1203 goto found_log;
1204 }
1205 }
1206 }
1207 return -EINVAL;
1208
1209found_log:
1210 d40c->phy_chan = &phys[i];
1211 d40c->log_num = log_num;
1212out:
1213
1214 if (is_log)
1215 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1216 else
1217 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1218
1219 return 0;
1220
1221}
1222
1223static int d40_config_chan(struct d40_chan *d40c,
1224 struct stedma40_chan_cfg *info)
1225{
1226
1227 /* Fill in basic CFG register values */
1228 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1229 &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
1230
1231 if (d40c->log_num != D40_PHY_CHAN) {
1232 d40_log_cfg(&d40c->dma_cfg,
1233 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1234
1235 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1236 d40c->lcpa = d40c->base->lcpa_base +
1237 d40c->dma_cfg.src_dev_type * 32;
1238 else
1239 d40c->lcpa = d40c->base->lcpa_base +
1240 d40c->dma_cfg.dst_dev_type * 32 + 16;
1241 }
1242
1243 /* Write channel configuration to the DMA */
1244 return d40_config_write(d40c);
1245}
1246
1247static int d40_config_memcpy(struct d40_chan *d40c)
1248{
1249 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1250
1251 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1252 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1253 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1254 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1255 memcpy[d40c->chan.chan_id];
1256
1257 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1258 dma_has_cap(DMA_SLAVE, cap)) {
1259 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1260 } else {
1261 dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
1262 __func__);
1263 return -EINVAL;
1264 }
1265
1266 return 0;
1267}
1268
1269
1270static int d40_free_dma(struct d40_chan *d40c)
1271{
1272
1273 int res = 0;
1274 u32 event, dir;
1275 struct d40_phy_res *phy = d40c->phy_chan;
1276 bool is_src;
1277
1278 /* Terminate all queued and active transfers */
1279 d40_term_all(d40c);
1280
1281 if (phy == NULL) {
1282 dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
1283 __func__);
1284 return -EINVAL;
1285 }
1286
1287 if (phy->allocated_src == D40_ALLOC_FREE &&
1288 phy->allocated_dst == D40_ALLOC_FREE) {
1289 dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
1290 __func__);
1291 return -EINVAL;
1292 }
1293
1294
1295 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1296 if (res) {
1297 dev_err(&d40c->chan.dev->device, "[%s] suspend\n",
1298 __func__);
1299 return res;
1300 }
1301
1302 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1303 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1304 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1305 dir = D40_CHAN_REG_SDLNK;
1306 is_src = false;
1307 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1308 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1309 dir = D40_CHAN_REG_SSLNK;
1310 is_src = true;
1311 } else {
1312 dev_err(&d40c->chan.dev->device,
1313 "[%s] Unknown direction\n", __func__);
1314 return -EINVAL;
1315 }
1316
1317 if (d40c->log_num != D40_PHY_CHAN) {
1318 /*
1319 * Release logical channel, deactivate the event line during
1320 * the time physical res is suspended.
1321 */
1322 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) &
1323 D40_EVENTLINE_MASK(event),
1324 d40c->base->virtbase + D40_DREG_PCBASE +
1325 phy->num * D40_DREG_PCDELTA + dir);
1326
1327 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1328
1329 /*
1330 * Check if there are more logical allocation
1331 * on this phy channel.
1332 */
1333 if (!d40_alloc_mask_free(phy, is_src, event)) {
1334 /* Resume the other logical channels if any */
1335 if (d40_chan_has_events(d40c)) {
1336 res = d40_channel_execute_command(d40c,
1337 D40_DMA_RUN);
1338 if (res) {
1339 dev_err(&d40c->chan.dev->device,
1340 "[%s] Executing RUN command\n",
1341 __func__);
1342 return res;
1343 }
1344 }
1345 return 0;
1346 }
1347 } else
1348 d40_alloc_mask_free(phy, is_src, 0);
1349
1350 /* Release physical channel */
1351 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1352 if (res) {
1353 dev_err(&d40c->chan.dev->device,
1354 "[%s] Failed to stop channel\n", __func__);
1355 return res;
1356 }
1357 d40c->phy_chan = NULL;
1358 /* Invalidate channel type */
1359 d40c->dma_cfg.channel_type = 0;
1360 d40c->base->lookup_phy_chans[phy->num] = NULL;
1361
1362 return 0;
1363
1364
1365}
1366
1367static int d40_pause(struct dma_chan *chan)
1368{
1369 struct d40_chan *d40c =
1370 container_of(chan, struct d40_chan, chan);
1371 int res;
1372
1373 unsigned long flags;
1374
1375 spin_lock_irqsave(&d40c->lock, flags);
1376
1377 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1378 if (res == 0) {
1379 if (d40c->log_num != D40_PHY_CHAN) {
1380 d40_config_set_event(d40c, false);
1381 /* Resume the other logical channels if any */
1382 if (d40_chan_has_events(d40c))
1383 res = d40_channel_execute_command(d40c,
1384 D40_DMA_RUN);
1385 }
1386 }
1387
1388 spin_unlock_irqrestore(&d40c->lock, flags);
1389 return res;
1390}
1391
1392static bool d40_tx_is_linked(struct d40_chan *d40c)
1393{
1394 bool is_link;
1395
1396 if (d40c->log_num != D40_PHY_CHAN)
1397 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1398 else
1399 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1400 d40c->phy_chan->num * D40_DREG_PCDELTA +
1401 D40_CHAN_REG_SDLNK) &
1402 D40_SREG_LNK_PHYS_LNK_MASK;
1403 return is_link;
1404}
1405
1406static u32 d40_residue(struct d40_chan *d40c)
1407{
1408 u32 num_elt;
1409
1410 if (d40c->log_num != D40_PHY_CHAN)
1411 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1412 >> D40_MEM_LCSP2_ECNT_POS;
1413 else
1414 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
1415 d40c->phy_chan->num * D40_DREG_PCDELTA +
1416 D40_CHAN_REG_SDELT) &
1417 D40_SREG_ELEM_PHY_ECNT_MASK) >> D40_SREG_ELEM_PHY_ECNT_POS;
1418 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
1419}
1420
1421static int d40_resume(struct dma_chan *chan)
1422{
1423 struct d40_chan *d40c =
1424 container_of(chan, struct d40_chan, chan);
1425 int res = 0;
1426 unsigned long flags;
1427
1428 spin_lock_irqsave(&d40c->lock, flags);
1429
1430 if (d40c->log_num != D40_PHY_CHAN) {
1431 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1432 if (res)
1433 goto out;
1434
1435 /* If bytes left to transfer or linked tx resume job */
1436 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
1437 d40_config_set_event(d40c, true);
1438 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1439 }
1440 } else if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1441 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1442
1443out:
1444 spin_unlock_irqrestore(&d40c->lock, flags);
1445 return res;
1446}
1447
1448static u32 stedma40_residue(struct dma_chan *chan)
1449{
1450 struct d40_chan *d40c =
1451 container_of(chan, struct d40_chan, chan);
1452 u32 bytes_left;
1453 unsigned long flags;
1454
1455 spin_lock_irqsave(&d40c->lock, flags);
1456 bytes_left = d40_residue(d40c);
1457 spin_unlock_irqrestore(&d40c->lock, flags);
1458
1459 return bytes_left;
1460}
1461
1462/* Public DMA functions in addition to the DMA engine framework */
1463
1464int stedma40_set_psize(struct dma_chan *chan,
1465 int src_psize,
1466 int dst_psize)
1467{
1468 struct d40_chan *d40c =
1469 container_of(chan, struct d40_chan, chan);
1470 unsigned long flags;
1471
1472 spin_lock_irqsave(&d40c->lock, flags);
1473
1474 if (d40c->log_num != D40_PHY_CHAN) {
1475 d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1476 d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1477 d40c->log_def.lcsp1 |= src_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
1478 d40c->log_def.lcsp3 |= dst_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
1479 goto out;
1480 }
1481
1482 if (src_psize == STEDMA40_PSIZE_PHY_1)
1483 d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1484 else {
1485 d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1486 d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1487 D40_SREG_CFG_PSIZE_POS);
1488 d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
1489 }
1490
1491 if (dst_psize == STEDMA40_PSIZE_PHY_1)
1492 d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1493 else {
1494 d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1495 d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1496 D40_SREG_CFG_PSIZE_POS);
1497 d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
1498 }
1499out:
1500 spin_unlock_irqrestore(&d40c->lock, flags);
1501 return 0;
1502}
1503EXPORT_SYMBOL(stedma40_set_psize);
1504
1505struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1506 struct scatterlist *sgl_dst,
1507 struct scatterlist *sgl_src,
1508 unsigned int sgl_len,
1509 unsigned long flags)
1510{
1511 int res;
1512 struct d40_desc *d40d;
1513 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1514 chan);
1515 unsigned long flg;
1516 int lli_max = d40c->base->plat_data->llis_per_log;
1517
1518
1519 spin_lock_irqsave(&d40c->lock, flg);
1520 d40d = d40_desc_get(d40c);
1521
1522 if (d40d == NULL)
1523 goto err;
1524
1525 memset(d40d, 0, sizeof(struct d40_desc));
1526 d40d->lli_len = sgl_len;
1527
1528 d40d->txd.flags = flags;
1529
1530 if (d40c->log_num != D40_PHY_CHAN) {
1531 if (sgl_len > 1)
1532 /*
1533 * Check if there is space available in lcla. If not,
1534 * split list into 1-length and run only in lcpa
1535 * space.
1536 */
1537 if (d40_lcla_id_get(d40c,
1538 &d40c->base->lcla_pool) != 0)
1539 lli_max = 1;
1540
1541 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
1542 dev_err(&d40c->chan.dev->device,
1543 "[%s] Out of memory\n", __func__);
1544 goto err;
1545 }
1546
1547 (void) d40_log_sg_to_lli(d40c->lcla.src_id,
1548 sgl_src,
1549 sgl_len,
1550 d40d->lli_log.src,
1551 d40c->log_def.lcsp1,
1552 d40c->dma_cfg.src_info.data_width,
1553 flags & DMA_PREP_INTERRUPT, lli_max,
1554 d40c->base->plat_data->llis_per_log);
1555
1556 (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
1557 sgl_dst,
1558 sgl_len,
1559 d40d->lli_log.dst,
1560 d40c->log_def.lcsp3,
1561 d40c->dma_cfg.dst_info.data_width,
1562 flags & DMA_PREP_INTERRUPT, lli_max,
1563 d40c->base->plat_data->llis_per_log);
1564
1565
1566 } else {
1567 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1568 dev_err(&d40c->chan.dev->device,
1569 "[%s] Out of memory\n", __func__);
1570 goto err;
1571 }
1572
1573 res = d40_phy_sg_to_lli(sgl_src,
1574 sgl_len,
1575 0,
1576 d40d->lli_phy.src,
1577 d40d->lli_phy.src_addr,
1578 d40c->src_def_cfg,
1579 d40c->dma_cfg.src_info.data_width,
1580 d40c->dma_cfg.src_info.psize,
1581 true);
1582
1583 if (res < 0)
1584 goto err;
1585
1586 res = d40_phy_sg_to_lli(sgl_dst,
1587 sgl_len,
1588 0,
1589 d40d->lli_phy.dst,
1590 d40d->lli_phy.dst_addr,
1591 d40c->dst_def_cfg,
1592 d40c->dma_cfg.dst_info.data_width,
1593 d40c->dma_cfg.dst_info.psize,
1594 true);
1595
1596 if (res < 0)
1597 goto err;
1598
1599 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1600 d40d->lli_pool.size, DMA_TO_DEVICE);
1601 }
1602
1603 dma_async_tx_descriptor_init(&d40d->txd, chan);
1604
1605 d40d->txd.tx_submit = d40_tx_submit;
1606
1607 spin_unlock_irqrestore(&d40c->lock, flg);
1608
1609 return &d40d->txd;
1610err:
1611 spin_unlock_irqrestore(&d40c->lock, flg);
1612 return NULL;
1613}
1614EXPORT_SYMBOL(stedma40_memcpy_sg);
1615
1616bool stedma40_filter(struct dma_chan *chan, void *data)
1617{
1618 struct stedma40_chan_cfg *info = data;
1619 struct d40_chan *d40c =
1620 container_of(chan, struct d40_chan, chan);
1621 int err;
1622
1623 if (data) {
1624 err = d40_validate_conf(d40c, info);
1625 if (!err)
1626 d40c->dma_cfg = *info;
1627 } else
1628 err = d40_config_memcpy(d40c);
1629
1630 return err == 0;
1631}
1632EXPORT_SYMBOL(stedma40_filter);
1633
1634/* DMA ENGINE functions */
1635static int d40_alloc_chan_resources(struct dma_chan *chan)
1636{
1637 int err;
1638 unsigned long flags;
1639 struct d40_chan *d40c =
1640 container_of(chan, struct d40_chan, chan);
1641
1642 spin_lock_irqsave(&d40c->lock, flags);
1643
1644 d40c->completed = chan->cookie = 1;
1645
1646 /*
1647 * If no dma configuration is set (channel_type == 0)
1648 * use default configuration
1649 */
1650 if (d40c->dma_cfg.channel_type == 0) {
1651 err = d40_config_memcpy(d40c);
1652 if (err)
1653 goto err_alloc;
1654 }
1655
1656 err = d40_allocate_channel(d40c);
1657 if (err) {
1658 dev_err(&d40c->chan.dev->device,
1659 "[%s] Failed to allocate channel\n", __func__);
1660 goto err_alloc;
1661 }
1662
1663 err = d40_config_chan(d40c, &d40c->dma_cfg);
1664 if (err) {
1665 dev_err(&d40c->chan.dev->device,
1666 "[%s] Failed to configure channel\n",
1667 __func__);
1668 goto err_config;
1669 }
1670
1671 spin_unlock_irqrestore(&d40c->lock, flags);
1672 return 0;
1673
1674 err_config:
1675 (void) d40_free_dma(d40c);
1676 err_alloc:
1677 spin_unlock_irqrestore(&d40c->lock, flags);
1678 dev_err(&d40c->chan.dev->device,
1679 "[%s] Channel allocation failed\n", __func__);
1680 return -EINVAL;
1681}
1682
1683static void d40_free_chan_resources(struct dma_chan *chan)
1684{
1685 struct d40_chan *d40c =
1686 container_of(chan, struct d40_chan, chan);
1687 int err;
1688 unsigned long flags;
1689
1690 spin_lock_irqsave(&d40c->lock, flags);
1691
1692 err = d40_free_dma(d40c);
1693
1694 if (err)
1695 dev_err(&d40c->chan.dev->device,
1696 "[%s] Failed to free channel\n", __func__);
1697 spin_unlock_irqrestore(&d40c->lock, flags);
1698}
1699
1700static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1701 dma_addr_t dst,
1702 dma_addr_t src,
1703 size_t size,
1704 unsigned long flags)
1705{
1706 struct d40_desc *d40d;
1707 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1708 chan);
1709 unsigned long flg;
1710 int err = 0;
1711
1712 spin_lock_irqsave(&d40c->lock, flg);
1713 d40d = d40_desc_get(d40c);
1714
1715 if (d40d == NULL) {
1716 dev_err(&d40c->chan.dev->device,
1717 "[%s] Descriptor is NULL\n", __func__);
1718 goto err;
1719 }
1720
1721 memset(d40d, 0, sizeof(struct d40_desc));
1722
1723 d40d->txd.flags = flags;
1724
1725 dma_async_tx_descriptor_init(&d40d->txd, chan);
1726
1727 d40d->txd.tx_submit = d40_tx_submit;
1728
1729 if (d40c->log_num != D40_PHY_CHAN) {
1730
1731 if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
1732 dev_err(&d40c->chan.dev->device,
1733 "[%s] Out of memory\n", __func__);
1734 goto err;
1735 }
1736 d40d->lli_len = 1;
1737
1738 d40_log_fill_lli(d40d->lli_log.src,
1739 src,
1740 size,
1741 0,
1742 d40c->log_def.lcsp1,
1743 d40c->dma_cfg.src_info.data_width,
1744 true, true);
1745
1746 d40_log_fill_lli(d40d->lli_log.dst,
1747 dst,
1748 size,
1749 0,
1750 d40c->log_def.lcsp3,
1751 d40c->dma_cfg.dst_info.data_width,
1752 true, true);
1753
1754 } else {
1755
1756 if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
1757 dev_err(&d40c->chan.dev->device,
1758 "[%s] Out of memory\n", __func__);
1759 goto err;
1760 }
1761
1762 err = d40_phy_fill_lli(d40d->lli_phy.src,
1763 src,
1764 size,
1765 d40c->dma_cfg.src_info.psize,
1766 0,
1767 d40c->src_def_cfg,
1768 true,
1769 d40c->dma_cfg.src_info.data_width,
1770 false);
1771 if (err)
1772 goto err_fill_lli;
1773
1774 err = d40_phy_fill_lli(d40d->lli_phy.dst,
1775 dst,
1776 size,
1777 d40c->dma_cfg.dst_info.psize,
1778 0,
1779 d40c->dst_def_cfg,
1780 true,
1781 d40c->dma_cfg.dst_info.data_width,
1782 false);
1783
1784 if (err)
1785 goto err_fill_lli;
1786
1787 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1788 d40d->lli_pool.size, DMA_TO_DEVICE);
1789 }
1790
1791 spin_unlock_irqrestore(&d40c->lock, flg);
1792 return &d40d->txd;
1793
1794err_fill_lli:
1795 dev_err(&d40c->chan.dev->device,
1796 "[%s] Failed filling in PHY LLI\n", __func__);
1797 d40_pool_lli_free(d40d);
1798err:
1799 spin_unlock_irqrestore(&d40c->lock, flg);
1800 return NULL;
1801}
1802
1803static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1804 struct d40_chan *d40c,
1805 struct scatterlist *sgl,
1806 unsigned int sg_len,
1807 enum dma_data_direction direction,
1808 unsigned long flags)
1809{
1810 dma_addr_t dev_addr = 0;
1811 int total_size;
1812 int lli_max = d40c->base->plat_data->llis_per_log;
1813
1814 if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
1815 dev_err(&d40c->chan.dev->device,
1816 "[%s] Out of memory\n", __func__);
1817 return -ENOMEM;
1818 }
1819
1820 d40d->lli_len = sg_len;
1821 d40d->lli_tcount = 0;
1822
1823 if (sg_len > 1)
1824 /*
1825 * Check if there is space available in lcla.
1826 * If not, split list into 1-length and run only
1827 * in lcpa space.
1828 */
1829 if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0)
1830 lli_max = 1;
1831
1832 if (direction == DMA_FROM_DEVICE) {
1833 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1834 total_size = d40_log_sg_to_dev(&d40c->lcla,
1835 sgl, sg_len,
1836 &d40d->lli_log,
1837 &d40c->log_def,
1838 d40c->dma_cfg.src_info.data_width,
1839 d40c->dma_cfg.dst_info.data_width,
1840 direction,
1841 flags & DMA_PREP_INTERRUPT,
1842 dev_addr, lli_max,
1843 d40c->base->plat_data->llis_per_log);
1844 } else if (direction == DMA_TO_DEVICE) {
1845 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1846 total_size = d40_log_sg_to_dev(&d40c->lcla,
1847 sgl, sg_len,
1848 &d40d->lli_log,
1849 &d40c->log_def,
1850 d40c->dma_cfg.src_info.data_width,
1851 d40c->dma_cfg.dst_info.data_width,
1852 direction,
1853 flags & DMA_PREP_INTERRUPT,
1854 dev_addr, lli_max,
1855 d40c->base->plat_data->llis_per_log);
1856 } else
1857 return -EINVAL;
1858 if (total_size < 0)
1859 return -EINVAL;
1860
1861 return 0;
1862}
1863
1864static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1865 struct d40_chan *d40c,
1866 struct scatterlist *sgl,
1867 unsigned int sgl_len,
1868 enum dma_data_direction direction,
1869 unsigned long flags)
1870{
1871 dma_addr_t src_dev_addr;
1872 dma_addr_t dst_dev_addr;
1873 int res;
1874
1875 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1876 dev_err(&d40c->chan.dev->device,
1877 "[%s] Out of memory\n", __func__);
1878 return -ENOMEM;
1879 }
1880
1881 d40d->lli_len = sgl_len;
1882 d40d->lli_tcount = 0;
1883
1884 if (direction == DMA_FROM_DEVICE) {
1885 dst_dev_addr = 0;
1886 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1887 } else if (direction == DMA_TO_DEVICE) {
1888 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1889 src_dev_addr = 0;
1890 } else
1891 return -EINVAL;
1892
1893 res = d40_phy_sg_to_lli(sgl,
1894 sgl_len,
1895 src_dev_addr,
1896 d40d->lli_phy.src,
1897 d40d->lli_phy.src_addr,
1898 d40c->src_def_cfg,
1899 d40c->dma_cfg.src_info.data_width,
1900 d40c->dma_cfg.src_info.psize,
1901 true);
1902 if (res < 0)
1903 return res;
1904
1905 res = d40_phy_sg_to_lli(sgl,
1906 sgl_len,
1907 dst_dev_addr,
1908 d40d->lli_phy.dst,
1909 d40d->lli_phy.dst_addr,
1910 d40c->dst_def_cfg,
1911 d40c->dma_cfg.dst_info.data_width,
1912 d40c->dma_cfg.dst_info.psize,
1913 true);
1914 if (res < 0)
1915 return res;
1916
1917 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1918 d40d->lli_pool.size, DMA_TO_DEVICE);
1919 return 0;
1920}
1921
1922static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
1923 struct scatterlist *sgl,
1924 unsigned int sg_len,
1925 enum dma_data_direction direction,
1926 unsigned long flags)
1927{
1928 struct d40_desc *d40d;
1929 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1930 chan);
1931 unsigned long flg;
1932 int err;
1933
1934 if (d40c->dma_cfg.pre_transfer)
1935 d40c->dma_cfg.pre_transfer(chan,
1936 d40c->dma_cfg.pre_transfer_data,
1937 sg_dma_len(sgl));
1938
1939 spin_lock_irqsave(&d40c->lock, flg);
1940 d40d = d40_desc_get(d40c);
1941 spin_unlock_irqrestore(&d40c->lock, flg);
1942
1943 if (d40d == NULL)
1944 return NULL;
1945
1946 memset(d40d, 0, sizeof(struct d40_desc));
1947
1948 if (d40c->log_num != D40_PHY_CHAN)
1949 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
1950 direction, flags);
1951 else
1952 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
1953 direction, flags);
1954 if (err) {
1955 dev_err(&d40c->chan.dev->device,
1956 "[%s] Failed to prepare %s slave sg job: %d\n",
1957 __func__,
1958 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
1959 return NULL;
1960 }
1961
1962 d40d->txd.flags = flags;
1963
1964 dma_async_tx_descriptor_init(&d40d->txd, chan);
1965
1966 d40d->txd.tx_submit = d40_tx_submit;
1967
1968 return &d40d->txd;
1969}
1970
1971static enum dma_status d40_tx_status(struct dma_chan *chan,
1972 dma_cookie_t cookie,
1973 struct dma_tx_state *txstate)
1974{
1975 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1976 dma_cookie_t last_used;
1977 dma_cookie_t last_complete;
1978 int ret;
1979
1980 last_complete = d40c->completed;
1981 last_used = chan->cookie;
1982
1983 ret = dma_async_is_complete(cookie, last_complete, last_used);
1984
1985 if (txstate) {
1986 txstate->last = last_complete;
1987 txstate->used = last_used;
1988 txstate->residue = stedma40_residue(chan);
1989 }
1990
1991 return ret;
1992}
1993
1994static void d40_issue_pending(struct dma_chan *chan)
1995{
1996 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1997 unsigned long flags;
1998
1999 spin_lock_irqsave(&d40c->lock, flags);
2000
2001 /* Busy means that pending jobs are already being processed */
2002 if (!d40c->busy)
2003 (void) d40_queue_start(d40c);
2004
2005 spin_unlock_irqrestore(&d40c->lock, flags);
2006}
2007
2008static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
2009{
2010 unsigned long flags;
2011 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2012
2013 switch (cmd) {
2014 case DMA_TERMINATE_ALL:
2015 spin_lock_irqsave(&d40c->lock, flags);
2016 d40_term_all(d40c);
2017 spin_unlock_irqrestore(&d40c->lock, flags);
2018 return 0;
2019 case DMA_PAUSE:
2020 return d40_pause(chan);
2021 case DMA_RESUME:
2022 return d40_resume(chan);
2023 }
2024
2025 /* Other commands are unimplemented */
2026 return -ENXIO;
2027}
2028
2029/* Initialization functions */
2030
2031static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2032 struct d40_chan *chans, int offset,
2033 int num_chans)
2034{
2035 int i = 0;
2036 struct d40_chan *d40c;
2037
2038 INIT_LIST_HEAD(&dma->channels);
2039
2040 for (i = offset; i < offset + num_chans; i++) {
2041 d40c = &chans[i];
2042 d40c->base = base;
2043 d40c->chan.device = dma;
2044
2045 /* Invalidate lcla element */
2046 d40c->lcla.src_id = -1;
2047 d40c->lcla.dst_id = -1;
2048
2049 spin_lock_init(&d40c->lock);
2050
2051 d40c->log_num = D40_PHY_CHAN;
2052
2053 INIT_LIST_HEAD(&d40c->free);
2054 INIT_LIST_HEAD(&d40c->active);
2055 INIT_LIST_HEAD(&d40c->queue);
2056 INIT_LIST_HEAD(&d40c->client);
2057
2058 d40c->free_len = 0;
2059
2060 tasklet_init(&d40c->tasklet, dma_tasklet,
2061 (unsigned long) d40c);
2062
2063 list_add_tail(&d40c->chan.device_node,
2064 &dma->channels);
2065 }
2066}
2067
2068static int __init d40_dmaengine_init(struct d40_base *base,
2069 int num_reserved_chans)
2070{
2071 int err ;
2072
2073 d40_chan_init(base, &base->dma_slave, base->log_chans,
2074 0, base->num_log_chans);
2075
2076 dma_cap_zero(base->dma_slave.cap_mask);
2077 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2078
2079 base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
2080 base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
2081 base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
2082 base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
2083 base->dma_slave.device_tx_status = d40_tx_status;
2084 base->dma_slave.device_issue_pending = d40_issue_pending;
2085 base->dma_slave.device_control = d40_control;
2086 base->dma_slave.dev = base->dev;
2087
2088 err = dma_async_device_register(&base->dma_slave);
2089
2090 if (err) {
2091 dev_err(base->dev,
2092 "[%s] Failed to register slave channels\n",
2093 __func__);
2094 goto failure1;
2095 }
2096
2097 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2098 base->num_log_chans, base->plat_data->memcpy_len);
2099
2100 dma_cap_zero(base->dma_memcpy.cap_mask);
2101 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2102
2103 base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
2104 base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
2105 base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
2106 base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
2107 base->dma_memcpy.device_tx_status = d40_tx_status;
2108 base->dma_memcpy.device_issue_pending = d40_issue_pending;
2109 base->dma_memcpy.device_control = d40_control;
2110 base->dma_memcpy.dev = base->dev;
2111 /*
2112 * This controller can only access address at even
2113 * 32bit boundaries, i.e. 2^2
2114 */
2115 base->dma_memcpy.copy_align = 2;
2116
2117 err = dma_async_device_register(&base->dma_memcpy);
2118
2119 if (err) {
2120 dev_err(base->dev,
2121 "[%s] Failed to regsiter memcpy only channels\n",
2122 __func__);
2123 goto failure2;
2124 }
2125
2126 d40_chan_init(base, &base->dma_both, base->phy_chans,
2127 0, num_reserved_chans);
2128
2129 dma_cap_zero(base->dma_both.cap_mask);
2130 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2131 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2132
2133 base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
2134 base->dma_both.device_free_chan_resources = d40_free_chan_resources;
2135 base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
2136 base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
2137 base->dma_both.device_tx_status = d40_tx_status;
2138 base->dma_both.device_issue_pending = d40_issue_pending;
2139 base->dma_both.device_control = d40_control;
2140 base->dma_both.dev = base->dev;
2141 base->dma_both.copy_align = 2;
2142 err = dma_async_device_register(&base->dma_both);
2143
2144 if (err) {
2145 dev_err(base->dev,
2146 "[%s] Failed to register logical and physical capable channels\n",
2147 __func__);
2148 goto failure3;
2149 }
2150 return 0;
2151failure3:
2152 dma_async_device_unregister(&base->dma_memcpy);
2153failure2:
2154 dma_async_device_unregister(&base->dma_slave);
2155failure1:
2156 return err;
2157}
2158
2159/* Initialization functions. */
2160
2161static int __init d40_phy_res_init(struct d40_base *base)
2162{
2163 int i;
2164 int num_phy_chans_avail = 0;
2165 u32 val[2];
2166 int odd_even_bit = -2;
2167
2168 val[0] = readl(base->virtbase + D40_DREG_PRSME);
2169 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2170
2171 for (i = 0; i < base->num_phy_chans; i++) {
2172 base->phy_res[i].num = i;
2173 odd_even_bit += 2 * ((i % 2) == 0);
2174 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2175 /* Mark security only channels as occupied */
2176 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2177 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2178 } else {
2179 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2180 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
2181 num_phy_chans_avail++;
2182 }
2183 spin_lock_init(&base->phy_res[i].lock);
2184 }
2185 dev_info(base->dev, "%d of %d physical DMA channels available\n",
2186 num_phy_chans_avail, base->num_phy_chans);
2187
2188 /* Verify settings extended vs standard */
2189 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2190
2191 for (i = 0; i < base->num_phy_chans; i++) {
2192
2193 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2194 (val[0] & 0x3) != 1)
2195 dev_info(base->dev,
2196 "[%s] INFO: channel %d is misconfigured (%d)\n",
2197 __func__, i, val[0] & 0x3);
2198
2199 val[0] = val[0] >> 2;
2200 }
2201
2202 return num_phy_chans_avail;
2203}
2204
2205static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2206{
2207 static const struct d40_reg_val dma_id_regs[] = {
2208 /* Peripheral Id */
2209 { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
2210 { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
2211 /*
2212 * D40_DREG_PERIPHID2 Depends on HW revision:
2213 * MOP500/HREF ED has 0x0008,
2214 * ? has 0x0018,
2215 * HREF V1 has 0x0028
2216 */
2217 { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
2218
2219 /* PCell Id */
2220 { .reg = D40_DREG_CELLID0, .val = 0x000d},
2221 { .reg = D40_DREG_CELLID1, .val = 0x00f0},
2222 { .reg = D40_DREG_CELLID2, .val = 0x0005},
2223 { .reg = D40_DREG_CELLID3, .val = 0x00b1}
2224 };
2225 struct stedma40_platform_data *plat_data;
2226 struct clk *clk = NULL;
2227 void __iomem *virtbase = NULL;
2228 struct resource *res = NULL;
2229 struct d40_base *base = NULL;
2230 int num_log_chans = 0;
2231 int num_phy_chans;
2232 int i;
2233
2234 clk = clk_get(&pdev->dev, NULL);
2235
2236 if (IS_ERR(clk)) {
2237 dev_err(&pdev->dev, "[%s] No matching clock found\n",
2238 __func__);
2239 goto failure;
2240 }
2241
2242 clk_enable(clk);
2243
2244 /* Get IO for DMAC base address */
2245 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2246 if (!res)
2247 goto failure;
2248
2249 if (request_mem_region(res->start, resource_size(res),
2250 D40_NAME " I/O base") == NULL)
2251 goto failure;
2252
2253 virtbase = ioremap(res->start, resource_size(res));
2254 if (!virtbase)
2255 goto failure;
2256
2257 /* HW version check */
2258 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
2259 if (dma_id_regs[i].val !=
2260 readl(virtbase + dma_id_regs[i].reg)) {
2261 dev_err(&pdev->dev,
2262 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2263 __func__,
2264 dma_id_regs[i].val,
2265 dma_id_regs[i].reg,
2266 readl(virtbase + dma_id_regs[i].reg));
2267 goto failure;
2268 }
2269 }
2270
2271 i = readl(virtbase + D40_DREG_PERIPHID2);
2272
2273 if ((i & 0xf) != D40_PERIPHID2_DESIGNER) {
2274 dev_err(&pdev->dev,
2275 "[%s] Unknown designer! Got %x wanted %x\n",
2276 __func__, i & 0xf, D40_PERIPHID2_DESIGNER);
2277 goto failure;
2278 }
2279
2280 /* The number of physical channels on this HW */
2281 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2282
2283 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2284 (i >> 4) & 0xf, res->start);
2285
2286 plat_data = pdev->dev.platform_data;
2287
2288 /* Count the number of logical channels in use */
2289 for (i = 0; i < plat_data->dev_len; i++)
2290 if (plat_data->dev_rx[i] != 0)
2291 num_log_chans++;
2292
2293 for (i = 0; i < plat_data->dev_len; i++)
2294 if (plat_data->dev_tx[i] != 0)
2295 num_log_chans++;
2296
2297 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
2298 (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
2299 sizeof(struct d40_chan), GFP_KERNEL);
2300
2301 if (base == NULL) {
2302 dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
2303 goto failure;
2304 }
2305
2306 base->clk = clk;
2307 base->num_phy_chans = num_phy_chans;
2308 base->num_log_chans = num_log_chans;
2309 base->phy_start = res->start;
2310 base->phy_size = resource_size(res);
2311 base->virtbase = virtbase;
2312 base->plat_data = plat_data;
2313 base->dev = &pdev->dev;
2314 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
2315 base->log_chans = &base->phy_chans[num_phy_chans];
2316
2317 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
2318 GFP_KERNEL);
2319 if (!base->phy_res)
2320 goto failure;
2321
2322 base->lookup_phy_chans = kzalloc(num_phy_chans *
2323 sizeof(struct d40_chan *),
2324 GFP_KERNEL);
2325 if (!base->lookup_phy_chans)
2326 goto failure;
2327
2328 if (num_log_chans + plat_data->memcpy_len) {
2329 /*
2330 * The max number of logical channels are event lines for all
2331 * src devices and dst devices
2332 */
2333 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2334 sizeof(struct d40_chan *),
2335 GFP_KERNEL);
2336 if (!base->lookup_log_chans)
2337 goto failure;
2338 }
2339 base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32),
2340 GFP_KERNEL);
2341 if (!base->lcla_pool.alloc_map)
2342 goto failure;
2343
2344 return base;
2345
2346failure:
2347 if (clk) {
2348 clk_disable(clk);
2349 clk_put(clk);
2350 }
2351 if (virtbase)
2352 iounmap(virtbase);
2353 if (res)
2354 release_mem_region(res->start,
2355 resource_size(res));
2356 if (virtbase)
2357 iounmap(virtbase);
2358
2359 if (base) {
2360 kfree(base->lcla_pool.alloc_map);
2361 kfree(base->lookup_log_chans);
2362 kfree(base->lookup_phy_chans);
2363 kfree(base->phy_res);
2364 kfree(base);
2365 }
2366
2367 return NULL;
2368}
2369
2370static void __init d40_hw_init(struct d40_base *base)
2371{
2372
2373 static const struct d40_reg_val dma_init_reg[] = {
2374 /* Clock every part of the DMA block from start */
2375 { .reg = D40_DREG_GCC, .val = 0x0000ff01},
2376
2377 /* Interrupts on all logical channels */
2378 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
2379 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
2380 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
2381 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
2382 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
2383 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
2384 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
2385 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
2386 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
2387 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
2388 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
2389 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
2390 };
2391 int i;
2392 u32 prmseo[2] = {0, 0};
2393 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2394 u32 pcmis = 0;
2395 u32 pcicr = 0;
2396
2397 for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
2398 writel(dma_init_reg[i].val,
2399 base->virtbase + dma_init_reg[i].reg);
2400
2401 /* Configure all our dma channels to default settings */
2402 for (i = 0; i < base->num_phy_chans; i++) {
2403
2404 activeo[i % 2] = activeo[i % 2] << 2;
2405
2406 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
2407 == D40_ALLOC_PHY) {
2408 activeo[i % 2] |= 3;
2409 continue;
2410 }
2411
2412 /* Enable interrupt # */
2413 pcmis = (pcmis << 1) | 1;
2414
2415 /* Clear interrupt # */
2416 pcicr = (pcicr << 1) | 1;
2417
2418 /* Set channel to physical mode */
2419 prmseo[i % 2] = prmseo[i % 2] << 2;
2420 prmseo[i % 2] |= 1;
2421
2422 }
2423
2424 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
2425 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
2426 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
2427 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
2428
2429 /* Write which interrupt to enable */
2430 writel(pcmis, base->virtbase + D40_DREG_PCMIS);
2431
2432 /* Write which interrupt to clear */
2433 writel(pcicr, base->virtbase + D40_DREG_PCICR);
2434
2435}
2436
2437static int __init d40_probe(struct platform_device *pdev)
2438{
2439 int err;
2440 int ret = -ENOENT;
2441 struct d40_base *base;
2442 struct resource *res = NULL;
2443 int num_reserved_chans;
2444 u32 val;
2445
2446 base = d40_hw_detect_init(pdev);
2447
2448 if (!base)
2449 goto failure;
2450
2451 num_reserved_chans = d40_phy_res_init(base);
2452
2453 platform_set_drvdata(pdev, base);
2454
2455 spin_lock_init(&base->interrupt_lock);
2456 spin_lock_init(&base->execmd_lock);
2457
2458 /* Get IO for logical channel parameter address */
2459 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2460 if (!res) {
2461 ret = -ENOENT;
2462 dev_err(&pdev->dev,
2463 "[%s] No \"lcpa\" memory resource\n",
2464 __func__);
2465 goto failure;
2466 }
2467 base->lcpa_size = resource_size(res);
2468 base->phy_lcpa = res->start;
2469
2470 if (request_mem_region(res->start, resource_size(res),
2471 D40_NAME " I/O lcpa") == NULL) {
2472 ret = -EBUSY;
2473 dev_err(&pdev->dev,
2474 "[%s] Failed to request LCPA region 0x%x-0x%x\n",
2475 __func__, res->start, res->end);
2476 goto failure;
2477 }
2478
2479 /* We make use of ESRAM memory for this. */
2480 val = readl(base->virtbase + D40_DREG_LCPA);
2481 if (res->start != val && val != 0) {
2482 dev_warn(&pdev->dev,
2483 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2484 __func__, val, res->start);
2485 } else
2486 writel(res->start, base->virtbase + D40_DREG_LCPA);
2487
2488 base->lcpa_base = ioremap(res->start, resource_size(res));
2489 if (!base->lcpa_base) {
2490 ret = -ENOMEM;
2491 dev_err(&pdev->dev,
2492 "[%s] Failed to ioremap LCPA region\n",
2493 __func__);
2494 goto failure;
2495 }
2496 /* Get IO for logical channel link address */
2497 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcla");
2498 if (!res) {
2499 ret = -ENOENT;
2500 dev_err(&pdev->dev,
2501 "[%s] No \"lcla\" resource defined\n",
2502 __func__);
2503 goto failure;
2504 }
2505
2506 base->lcla_pool.base_size = resource_size(res);
2507 base->lcla_pool.phy = res->start;
2508
2509 if (request_mem_region(res->start, resource_size(res),
2510 D40_NAME " I/O lcla") == NULL) {
2511 ret = -EBUSY;
2512 dev_err(&pdev->dev,
2513 "[%s] Failed to request LCLA region 0x%x-0x%x\n",
2514 __func__, res->start, res->end);
2515 goto failure;
2516 }
2517 val = readl(base->virtbase + D40_DREG_LCLA);
2518 if (res->start != val && val != 0) {
2519 dev_warn(&pdev->dev,
2520 "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n",
2521 __func__, val, res->start);
2522 } else
2523 writel(res->start, base->virtbase + D40_DREG_LCLA);
2524
2525 base->lcla_pool.base = ioremap(res->start, resource_size(res));
2526 if (!base->lcla_pool.base) {
2527 ret = -ENOMEM;
2528 dev_err(&pdev->dev,
2529 "[%s] Failed to ioremap LCLA 0x%x-0x%x\n",
2530 __func__, res->start, res->end);
2531 goto failure;
2532 }
2533
2534 spin_lock_init(&base->lcla_pool.lock);
2535
2536 base->lcla_pool.num_blocks = base->num_phy_chans;
2537
2538 base->irq = platform_get_irq(pdev, 0);
2539
2540 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2541
2542 if (ret) {
2543 dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
2544 goto failure;
2545 }
2546
2547 err = d40_dmaengine_init(base, num_reserved_chans);
2548 if (err)
2549 goto failure;
2550
2551 d40_hw_init(base);
2552
2553 dev_info(base->dev, "initialized\n");
2554 return 0;
2555
2556failure:
2557 if (base) {
2558 if (base->virtbase)
2559 iounmap(base->virtbase);
2560 if (base->lcla_pool.phy)
2561 release_mem_region(base->lcla_pool.phy,
2562 base->lcla_pool.base_size);
2563 if (base->phy_lcpa)
2564 release_mem_region(base->phy_lcpa,
2565 base->lcpa_size);
2566 if (base->phy_start)
2567 release_mem_region(base->phy_start,
2568 base->phy_size);
2569 if (base->clk) {
2570 clk_disable(base->clk);
2571 clk_put(base->clk);
2572 }
2573
2574 kfree(base->lcla_pool.alloc_map);
2575 kfree(base->lookup_log_chans);
2576 kfree(base->lookup_phy_chans);
2577 kfree(base->phy_res);
2578 kfree(base);
2579 }
2580
2581 dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
2582 return ret;
2583}
2584
2585static struct platform_driver d40_driver = {
2586 .driver = {
2587 .owner = THIS_MODULE,
2588 .name = D40_NAME,
2589 },
2590};
2591
2592int __init stedma40_init(void)
2593{
2594 return platform_driver_probe(&d40_driver, d40_probe);
2595}
2596arch_initcall(stedma40_init);
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
new file mode 100644
index 000000000000..561fdd8a80c1
--- /dev/null
+++ b/drivers/dma/ste_dma40_ll.c
@@ -0,0 +1,454 @@
1/*
2 * driver/dma/ste_dma40_ll.c
3 *
4 * Copyright (C) ST-Ericsson 2007-2010
5 * License terms: GNU General Public License (GPL) version 2
6 * Author: Per Friden <per.friden@stericsson.com>
7 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
8 */
9
10#include <linux/kernel.h>
11#include <plat/ste_dma40.h>
12
13#include "ste_dma40_ll.h"
14
15/* Sets up proper LCSP1 and LCSP3 register for a logical channel */
16void d40_log_cfg(struct stedma40_chan_cfg *cfg,
17 u32 *lcsp1, u32 *lcsp3)
18{
19 u32 l3 = 0; /* dst */
20 u32 l1 = 0; /* src */
21
22 /* src is mem? -> increase address pos */
23 if (cfg->dir == STEDMA40_MEM_TO_PERIPH ||
24 cfg->dir == STEDMA40_MEM_TO_MEM)
25 l1 |= 1 << D40_MEM_LCSP1_SCFG_INCR_POS;
26
27 /* dst is mem? -> increase address pos */
28 if (cfg->dir == STEDMA40_PERIPH_TO_MEM ||
29 cfg->dir == STEDMA40_MEM_TO_MEM)
30 l3 |= 1 << D40_MEM_LCSP3_DCFG_INCR_POS;
31
32 /* src is hw? -> master port 1 */
33 if (cfg->dir == STEDMA40_PERIPH_TO_MEM ||
34 cfg->dir == STEDMA40_PERIPH_TO_PERIPH)
35 l1 |= 1 << D40_MEM_LCSP1_SCFG_MST_POS;
36
37 /* dst is hw? -> master port 1 */
38 if (cfg->dir == STEDMA40_MEM_TO_PERIPH ||
39 cfg->dir == STEDMA40_PERIPH_TO_PERIPH)
40 l3 |= 1 << D40_MEM_LCSP3_DCFG_MST_POS;
41
42 l3 |= 1 << D40_MEM_LCSP3_DCFG_TIM_POS;
43 l3 |= 1 << D40_MEM_LCSP3_DCFG_EIM_POS;
44 l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS;
45 l3 |= cfg->dst_info.data_width << D40_MEM_LCSP3_DCFG_ESIZE_POS;
46 l3 |= 1 << D40_MEM_LCSP3_DTCP_POS;
47
48 l1 |= 1 << D40_MEM_LCSP1_SCFG_EIM_POS;
49 l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
50 l1 |= cfg->src_info.data_width << D40_MEM_LCSP1_SCFG_ESIZE_POS;
51 l1 |= 1 << D40_MEM_LCSP1_STCP_POS;
52
53 *lcsp1 = l1;
54 *lcsp3 = l3;
55
56}
57
58/* Sets up SRC and DST CFG register for both logical and physical channels */
59void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
60 u32 *src_cfg, u32 *dst_cfg, bool is_log)
61{
62 u32 src = 0;
63 u32 dst = 0;
64
65 if (!is_log) {
66 /* Physical channel */
67 if ((cfg->dir == STEDMA40_PERIPH_TO_MEM) ||
68 (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) {
69 /* Set master port to 1 */
70 src |= 1 << D40_SREG_CFG_MST_POS;
71 src |= D40_TYPE_TO_EVENT(cfg->src_dev_type);
72
73 if (cfg->src_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL)
74 src |= 1 << D40_SREG_CFG_PHY_TM_POS;
75 else
76 src |= 3 << D40_SREG_CFG_PHY_TM_POS;
77 }
78 if ((cfg->dir == STEDMA40_MEM_TO_PERIPH) ||
79 (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) {
80 /* Set master port to 1 */
81 dst |= 1 << D40_SREG_CFG_MST_POS;
82 dst |= D40_TYPE_TO_EVENT(cfg->dst_dev_type);
83
84 if (cfg->dst_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL)
85 dst |= 1 << D40_SREG_CFG_PHY_TM_POS;
86 else
87 dst |= 3 << D40_SREG_CFG_PHY_TM_POS;
88 }
89 /* Interrupt on end of transfer for destination */
90 dst |= 1 << D40_SREG_CFG_TIM_POS;
91
92 /* Generate interrupt on error */
93 src |= 1 << D40_SREG_CFG_EIM_POS;
94 dst |= 1 << D40_SREG_CFG_EIM_POS;
95
96 /* PSIZE */
97 if (cfg->src_info.psize != STEDMA40_PSIZE_PHY_1) {
98 src |= 1 << D40_SREG_CFG_PHY_PEN_POS;
99 src |= cfg->src_info.psize << D40_SREG_CFG_PSIZE_POS;
100 }
101 if (cfg->dst_info.psize != STEDMA40_PSIZE_PHY_1) {
102 dst |= 1 << D40_SREG_CFG_PHY_PEN_POS;
103 dst |= cfg->dst_info.psize << D40_SREG_CFG_PSIZE_POS;
104 }
105
106 /* Element size */
107 src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS;
108 dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS;
109
110 } else {
111 /* Logical channel */
112 dst |= 1 << D40_SREG_CFG_LOG_GIM_POS;
113 src |= 1 << D40_SREG_CFG_LOG_GIM_POS;
114 }
115
116 if (cfg->channel_type & STEDMA40_HIGH_PRIORITY_CHANNEL) {
117 src |= 1 << D40_SREG_CFG_PRI_POS;
118 dst |= 1 << D40_SREG_CFG_PRI_POS;
119 }
120
121 src |= cfg->src_info.endianess << D40_SREG_CFG_LBE_POS;
122 dst |= cfg->dst_info.endianess << D40_SREG_CFG_LBE_POS;
123
124 *src_cfg = src;
125 *dst_cfg = dst;
126}
127
128int d40_phy_fill_lli(struct d40_phy_lli *lli,
129 dma_addr_t data,
130 u32 data_size,
131 int psize,
132 dma_addr_t next_lli,
133 u32 reg_cfg,
134 bool term_int,
135 u32 data_width,
136 bool is_device)
137{
138 int num_elems;
139
140 if (psize == STEDMA40_PSIZE_PHY_1)
141 num_elems = 1;
142 else
143 num_elems = 2 << psize;
144
145 /*
146 * Size is 16bit. data_width is 8, 16, 32 or 64 bit
147 * Block large than 64 KiB must be split.
148 */
149 if (data_size > (0xffff << data_width))
150 return -EINVAL;
151
152 /* Must be aligned */
153 if (!IS_ALIGNED(data, 0x1 << data_width))
154 return -EINVAL;
155
156 /* Transfer size can't be smaller than (num_elms * elem_size) */
157 if (data_size < num_elems * (0x1 << data_width))
158 return -EINVAL;
159
160 /* The number of elements. IE now many chunks */
161 lli->reg_elt = (data_size >> data_width) << D40_SREG_ELEM_PHY_ECNT_POS;
162
163 /*
164 * Distance to next element sized entry.
165 * Usually the size of the element unless you want gaps.
166 */
167 if (!is_device)
168 lli->reg_elt |= (0x1 << data_width) <<
169 D40_SREG_ELEM_PHY_EIDX_POS;
170
171 /* Where the data is */
172 lli->reg_ptr = data;
173 lli->reg_cfg = reg_cfg;
174
175 /* If this scatter list entry is the last one, no next link */
176 if (next_lli == 0)
177 lli->reg_lnk = 0x1 << D40_SREG_LNK_PHY_TCP_POS;
178 else
179 lli->reg_lnk = next_lli;
180
181 /* Set/clear interrupt generation on this link item.*/
182 if (term_int)
183 lli->reg_cfg |= 0x1 << D40_SREG_CFG_TIM_POS;
184 else
185 lli->reg_cfg &= ~(0x1 << D40_SREG_CFG_TIM_POS);
186
187 /* Post link */
188 lli->reg_lnk |= 0 << D40_SREG_LNK_PHY_PRE_POS;
189
190 return 0;
191}
192
193int d40_phy_sg_to_lli(struct scatterlist *sg,
194 int sg_len,
195 dma_addr_t target,
196 struct d40_phy_lli *lli,
197 dma_addr_t lli_phys,
198 u32 reg_cfg,
199 u32 data_width,
200 int psize,
201 bool term_int)
202{
203 int total_size = 0;
204 int i;
205 struct scatterlist *current_sg = sg;
206 dma_addr_t next_lli_phys;
207 dma_addr_t dst;
208 int err = 0;
209
210 for_each_sg(sg, current_sg, sg_len, i) {
211
212 total_size += sg_dma_len(current_sg);
213
214 /* If this scatter list entry is the last one, no next link */
215 if (sg_len - 1 == i)
216 next_lli_phys = 0;
217 else
218 next_lli_phys = ALIGN(lli_phys + (i + 1) *
219 sizeof(struct d40_phy_lli),
220 D40_LLI_ALIGN);
221
222 if (target)
223 dst = target;
224 else
225 dst = sg_phys(current_sg);
226
227 err = d40_phy_fill_lli(&lli[i],
228 dst,
229 sg_dma_len(current_sg),
230 psize,
231 next_lli_phys,
232 reg_cfg,
233 !next_lli_phys,
234 data_width,
235 target == dst);
236 if (err)
237 goto err;
238 }
239
240 return total_size;
241 err:
242 return err;
243}
244
245
246void d40_phy_lli_write(void __iomem *virtbase,
247 u32 phy_chan_num,
248 struct d40_phy_lli *lli_dst,
249 struct d40_phy_lli *lli_src)
250{
251
252 writel(lli_src->reg_cfg, virtbase + D40_DREG_PCBASE +
253 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSCFG);
254 writel(lli_src->reg_elt, virtbase + D40_DREG_PCBASE +
255 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
256 writel(lli_src->reg_ptr, virtbase + D40_DREG_PCBASE +
257 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSPTR);
258 writel(lli_src->reg_lnk, virtbase + D40_DREG_PCBASE +
259 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSLNK);
260
261 writel(lli_dst->reg_cfg, virtbase + D40_DREG_PCBASE +
262 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDCFG);
263 writel(lli_dst->reg_elt, virtbase + D40_DREG_PCBASE +
264 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
265 writel(lli_dst->reg_ptr, virtbase + D40_DREG_PCBASE +
266 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDPTR);
267 writel(lli_dst->reg_lnk, virtbase + D40_DREG_PCBASE +
268 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDLNK);
269
270}
271
272/* DMA logical lli operations */
273
274void d40_log_fill_lli(struct d40_log_lli *lli,
275 dma_addr_t data, u32 data_size,
276 u32 lli_next_off, u32 reg_cfg,
277 u32 data_width,
278 bool term_int, bool addr_inc)
279{
280 lli->lcsp13 = reg_cfg;
281
282 /* The number of elements to transfer */
283 lli->lcsp02 = ((data_size >> data_width) <<
284 D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK;
285 /* 16 LSBs address of the current element */
286 lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK;
287 /* 16 MSBs address of the current element */
288 lli->lcsp13 |= data & D40_MEM_LCSP1_SPTR_MASK;
289
290 if (addr_inc)
291 lli->lcsp13 |= D40_MEM_LCSP1_SCFG_INCR_MASK;
292
293 lli->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK;
294 /* If this scatter list entry is the last one, no next link */
295 lli->lcsp13 |= (lli_next_off << D40_MEM_LCSP1_SLOS_POS) &
296 D40_MEM_LCSP1_SLOS_MASK;
297
298 if (term_int)
299 lli->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK;
300 else
301 lli->lcsp13 &= ~D40_MEM_LCSP1_SCFG_TIM_MASK;
302}
303
304int d40_log_sg_to_dev(struct d40_lcla_elem *lcla,
305 struct scatterlist *sg,
306 int sg_len,
307 struct d40_log_lli_bidir *lli,
308 struct d40_def_lcsp *lcsp,
309 u32 src_data_width,
310 u32 dst_data_width,
311 enum dma_data_direction direction,
312 bool term_int, dma_addr_t dev_addr, int max_len,
313 int llis_per_log)
314{
315 int total_size = 0;
316 struct scatterlist *current_sg = sg;
317 int i;
318 u32 next_lli_off_dst;
319 u32 next_lli_off_src;
320
321 next_lli_off_src = 0;
322 next_lli_off_dst = 0;
323
324 for_each_sg(sg, current_sg, sg_len, i) {
325 total_size += sg_dma_len(current_sg);
326
327 /*
328 * If this scatter list entry is the last one or
329 * max length, terminate link.
330 */
331 if (sg_len - 1 == i || ((i+1) % max_len == 0)) {
332 next_lli_off_src = 0;
333 next_lli_off_dst = 0;
334 } else {
335 if (next_lli_off_dst == 0 &&
336 next_lli_off_src == 0) {
337 /* The first lli will be at next_lli_off */
338 next_lli_off_dst = (lcla->dst_id *
339 llis_per_log + 1);
340 next_lli_off_src = (lcla->src_id *
341 llis_per_log + 1);
342 } else {
343 next_lli_off_dst++;
344 next_lli_off_src++;
345 }
346 }
347
348 if (direction == DMA_TO_DEVICE) {
349 d40_log_fill_lli(&lli->src[i],
350 sg_phys(current_sg),
351 sg_dma_len(current_sg),
352 next_lli_off_src,
353 lcsp->lcsp1, src_data_width,
354 term_int && !next_lli_off_src,
355 true);
356 d40_log_fill_lli(&lli->dst[i],
357 dev_addr,
358 sg_dma_len(current_sg),
359 next_lli_off_dst,
360 lcsp->lcsp3, dst_data_width,
361 /* No next == terminal interrupt */
362 term_int && !next_lli_off_dst,
363 false);
364 } else {
365 d40_log_fill_lli(&lli->dst[i],
366 sg_phys(current_sg),
367 sg_dma_len(current_sg),
368 next_lli_off_dst,
369 lcsp->lcsp3, dst_data_width,
370 /* No next == terminal interrupt */
371 term_int && !next_lli_off_dst,
372 true);
373 d40_log_fill_lli(&lli->src[i],
374 dev_addr,
375 sg_dma_len(current_sg),
376 next_lli_off_src,
377 lcsp->lcsp1, src_data_width,
378 term_int && !next_lli_off_src,
379 false);
380 }
381 }
382 return total_size;
383}
384
385int d40_log_sg_to_lli(int lcla_id,
386 struct scatterlist *sg,
387 int sg_len,
388 struct d40_log_lli *lli_sg,
389 u32 lcsp13, /* src or dst*/
390 u32 data_width,
391 bool term_int, int max_len, int llis_per_log)
392{
393 int total_size = 0;
394 struct scatterlist *current_sg = sg;
395 int i;
396 u32 next_lli_off = 0;
397
398 for_each_sg(sg, current_sg, sg_len, i) {
399 total_size += sg_dma_len(current_sg);
400
401 /*
402 * If this scatter list entry is the last one or
403 * max length, terminate link.
404 */
405 if (sg_len - 1 == i || ((i+1) % max_len == 0))
406 next_lli_off = 0;
407 else {
408 if (next_lli_off == 0)
409 /* The first lli will be at next_lli_off */
410 next_lli_off = lcla_id * llis_per_log + 1;
411 else
412 next_lli_off++;
413 }
414
415 d40_log_fill_lli(&lli_sg[i],
416 sg_phys(current_sg),
417 sg_dma_len(current_sg),
418 next_lli_off,
419 lcsp13, data_width,
420 term_int && !next_lli_off,
421 true);
422 }
423 return total_size;
424}
425
426void d40_log_lli_write(struct d40_log_lli_full *lcpa,
427 struct d40_log_lli *lcla_src,
428 struct d40_log_lli *lcla_dst,
429 struct d40_log_lli *lli_dst,
430 struct d40_log_lli *lli_src,
431 int llis_per_log)
432{
433 u32 slos = 0;
434 u32 dlos = 0;
435 int i;
436
437 lcpa->lcsp0 = lli_src->lcsp02;
438 lcpa->lcsp1 = lli_src->lcsp13;
439 lcpa->lcsp2 = lli_dst->lcsp02;
440 lcpa->lcsp3 = lli_dst->lcsp13;
441
442 slos = lli_src->lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
443 dlos = lli_dst->lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
444
445 for (i = 0; (i < llis_per_log) && slos && dlos; i++) {
446 writel(lli_src[i+1].lcsp02, &lcla_src[i].lcsp02);
447 writel(lli_src[i+1].lcsp13, &lcla_src[i].lcsp13);
448 writel(lli_dst[i+1].lcsp02, &lcla_dst[i].lcsp02);
449 writel(lli_dst[i+1].lcsp13, &lcla_dst[i].lcsp13);
450
451 slos = lli_src[i+1].lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
452 dlos = lli_dst[i+1].lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
453 }
454}
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
new file mode 100644
index 000000000000..2029280cb332
--- /dev/null
+++ b/drivers/dma/ste_dma40_ll.h
@@ -0,0 +1,354 @@
1/*
2 * driver/dma/ste_dma40_ll.h
3 *
4 * Copyright (C) ST-Ericsson 2007-2010
5 * License terms: GNU General Public License (GPL) version 2
6 * Author: Per Friden <per.friden@stericsson.com>
7 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
8 */
9#ifndef STE_DMA40_LL_H
10#define STE_DMA40_LL_H
11
12#define D40_DREG_PCBASE 0x400
13#define D40_DREG_PCDELTA (8 * 4)
14#define D40_LLI_ALIGN 16 /* LLI alignment must be 16 bytes. */
15
16#define D40_TYPE_TO_GROUP(type) (type / 16)
17#define D40_TYPE_TO_EVENT(type) (type % 16)
18
19/* Most bits of the CFG register are the same in log as in phy mode */
20#define D40_SREG_CFG_MST_POS 15
21#define D40_SREG_CFG_TIM_POS 14
22#define D40_SREG_CFG_EIM_POS 13
23#define D40_SREG_CFG_LOG_INCR_POS 12
24#define D40_SREG_CFG_PHY_PEN_POS 12
25#define D40_SREG_CFG_PSIZE_POS 10
26#define D40_SREG_CFG_ESIZE_POS 8
27#define D40_SREG_CFG_PRI_POS 7
28#define D40_SREG_CFG_LBE_POS 6
29#define D40_SREG_CFG_LOG_GIM_POS 5
30#define D40_SREG_CFG_LOG_MFU_POS 4
31#define D40_SREG_CFG_PHY_TM_POS 4
32#define D40_SREG_CFG_PHY_EVTL_POS 0
33
34
35/* Standard channel parameters - basic mode (element register) */
36#define D40_SREG_ELEM_PHY_ECNT_POS 16
37#define D40_SREG_ELEM_PHY_EIDX_POS 0
38
39#define D40_SREG_ELEM_PHY_ECNT_MASK (0xFFFF << D40_SREG_ELEM_PHY_ECNT_POS)
40
41/* Standard channel parameters - basic mode (Link register) */
42#define D40_SREG_LNK_PHY_TCP_POS 0
43#define D40_SREG_LNK_PHY_LMP_POS 1
44#define D40_SREG_LNK_PHY_PRE_POS 2
45/*
46 * Source destination link address. Contains the
47 * 29-bit byte word aligned address of the reload area.
48 */
49#define D40_SREG_LNK_PHYS_LNK_MASK 0xFFFFFFF8UL
50
51/* Standard basic channel logical mode */
52
53/* Element register */
54#define D40_SREG_ELEM_LOG_ECNT_POS 16
55#define D40_SREG_ELEM_LOG_LIDX_POS 8
56#define D40_SREG_ELEM_LOG_LOS_POS 1
57#define D40_SREG_ELEM_LOG_TCP_POS 0
58
59#define D40_SREG_ELEM_LOG_LIDX_MASK (0xFF << D40_SREG_ELEM_LOG_LIDX_POS)
60
61/* Link register */
62#define D40_DEACTIVATE_EVENTLINE 0x0
63#define D40_ACTIVATE_EVENTLINE 0x1
64#define D40_EVENTLINE_POS(i) (2 * i)
65#define D40_EVENTLINE_MASK(i) (0x3 << D40_EVENTLINE_POS(i))
66
67/* Standard basic channel logical params in memory */
68
69/* LCSP0 */
70#define D40_MEM_LCSP0_ECNT_POS 16
71#define D40_MEM_LCSP0_SPTR_POS 0
72
73#define D40_MEM_LCSP0_ECNT_MASK (0xFFFF << D40_MEM_LCSP0_ECNT_POS)
74#define D40_MEM_LCSP0_SPTR_MASK (0xFFFF << D40_MEM_LCSP0_SPTR_POS)
75
76/* LCSP1 */
77#define D40_MEM_LCSP1_SPTR_POS 16
78#define D40_MEM_LCSP1_SCFG_MST_POS 15
79#define D40_MEM_LCSP1_SCFG_TIM_POS 14
80#define D40_MEM_LCSP1_SCFG_EIM_POS 13
81#define D40_MEM_LCSP1_SCFG_INCR_POS 12
82#define D40_MEM_LCSP1_SCFG_PSIZE_POS 10
83#define D40_MEM_LCSP1_SCFG_ESIZE_POS 8
84#define D40_MEM_LCSP1_SLOS_POS 1
85#define D40_MEM_LCSP1_STCP_POS 0
86
87#define D40_MEM_LCSP1_SPTR_MASK (0xFFFF << D40_MEM_LCSP1_SPTR_POS)
88#define D40_MEM_LCSP1_SCFG_TIM_MASK (0x1 << D40_MEM_LCSP1_SCFG_TIM_POS)
89#define D40_MEM_LCSP1_SCFG_INCR_MASK (0x1 << D40_MEM_LCSP1_SCFG_INCR_POS)
90#define D40_MEM_LCSP1_SCFG_PSIZE_MASK (0x3 << D40_MEM_LCSP1_SCFG_PSIZE_POS)
91#define D40_MEM_LCSP1_SLOS_MASK (0x7F << D40_MEM_LCSP1_SLOS_POS)
92#define D40_MEM_LCSP1_STCP_MASK (0x1 << D40_MEM_LCSP1_STCP_POS)
93
94/* LCSP2 */
95#define D40_MEM_LCSP2_ECNT_POS 16
96
97#define D40_MEM_LCSP2_ECNT_MASK (0xFFFF << D40_MEM_LCSP2_ECNT_POS)
98
99/* LCSP3 */
100#define D40_MEM_LCSP3_DCFG_MST_POS 15
101#define D40_MEM_LCSP3_DCFG_TIM_POS 14
102#define D40_MEM_LCSP3_DCFG_EIM_POS 13
103#define D40_MEM_LCSP3_DCFG_INCR_POS 12
104#define D40_MEM_LCSP3_DCFG_PSIZE_POS 10
105#define D40_MEM_LCSP3_DCFG_ESIZE_POS 8
106#define D40_MEM_LCSP3_DLOS_POS 1
107#define D40_MEM_LCSP3_DTCP_POS 0
108
109#define D40_MEM_LCSP3_DLOS_MASK (0x7F << D40_MEM_LCSP3_DLOS_POS)
110#define D40_MEM_LCSP3_DTCP_MASK (0x1 << D40_MEM_LCSP3_DTCP_POS)
111
112
113/* Standard channel parameter register offsets */
114#define D40_CHAN_REG_SSCFG 0x00
115#define D40_CHAN_REG_SSELT 0x04
116#define D40_CHAN_REG_SSPTR 0x08
117#define D40_CHAN_REG_SSLNK 0x0C
118#define D40_CHAN_REG_SDCFG 0x10
119#define D40_CHAN_REG_SDELT 0x14
120#define D40_CHAN_REG_SDPTR 0x18
121#define D40_CHAN_REG_SDLNK 0x1C
122
123/* DMA Register Offsets */
124#define D40_DREG_GCC 0x000
125#define D40_DREG_PRTYP 0x004
126#define D40_DREG_PRSME 0x008
127#define D40_DREG_PRSMO 0x00C
128#define D40_DREG_PRMSE 0x010
129#define D40_DREG_PRMSO 0x014
130#define D40_DREG_PRMOE 0x018
131#define D40_DREG_PRMOO 0x01C
132#define D40_DREG_LCPA 0x020
133#define D40_DREG_LCLA 0x024
134#define D40_DREG_ACTIVE 0x050
135#define D40_DREG_ACTIVO 0x054
136#define D40_DREG_FSEB1 0x058
137#define D40_DREG_FSEB2 0x05C
138#define D40_DREG_PCMIS 0x060
139#define D40_DREG_PCICR 0x064
140#define D40_DREG_PCTIS 0x068
141#define D40_DREG_PCEIS 0x06C
142#define D40_DREG_LCMIS0 0x080
143#define D40_DREG_LCMIS1 0x084
144#define D40_DREG_LCMIS2 0x088
145#define D40_DREG_LCMIS3 0x08C
146#define D40_DREG_LCICR0 0x090
147#define D40_DREG_LCICR1 0x094
148#define D40_DREG_LCICR2 0x098
149#define D40_DREG_LCICR3 0x09C
150#define D40_DREG_LCTIS0 0x0A0
151#define D40_DREG_LCTIS1 0x0A4
152#define D40_DREG_LCTIS2 0x0A8
153#define D40_DREG_LCTIS3 0x0AC
154#define D40_DREG_LCEIS0 0x0B0
155#define D40_DREG_LCEIS1 0x0B4
156#define D40_DREG_LCEIS2 0x0B8
157#define D40_DREG_LCEIS3 0x0BC
158#define D40_DREG_STFU 0xFC8
159#define D40_DREG_ICFG 0xFCC
160#define D40_DREG_PERIPHID0 0xFE0
161#define D40_DREG_PERIPHID1 0xFE4
162#define D40_DREG_PERIPHID2 0xFE8
163#define D40_DREG_PERIPHID3 0xFEC
164#define D40_DREG_CELLID0 0xFF0
165#define D40_DREG_CELLID1 0xFF4
166#define D40_DREG_CELLID2 0xFF8
167#define D40_DREG_CELLID3 0xFFC
168
169/* LLI related structures */
170
171/**
172 * struct d40_phy_lli - The basic configration register for each physical
173 * channel.
174 *
175 * @reg_cfg: The configuration register.
176 * @reg_elt: The element register.
177 * @reg_ptr: The pointer register.
178 * @reg_lnk: The link register.
179 *
180 * These registers are set up for both physical and logical transfers
181 * Note that the bit in each register means differently in logical and
182 * physical(standard) mode.
183 *
184 * This struct must be 16 bytes aligned, and only contain physical registers
185 * since it will be directly accessed by the DMA.
186 */
187struct d40_phy_lli {
188 u32 reg_cfg;
189 u32 reg_elt;
190 u32 reg_ptr;
191 u32 reg_lnk;
192};
193
194/**
195 * struct d40_phy_lli_bidir - struct for a transfer.
196 *
197 * @src: Register settings for src channel.
198 * @dst: Register settings for dst channel.
199 * @dst_addr: Physical destination address.
200 * @src_addr: Physical source address.
201 *
202 * All DMA transfers have a source and a destination.
203 */
204
205struct d40_phy_lli_bidir {
206 struct d40_phy_lli *src;
207 struct d40_phy_lli *dst;
208 dma_addr_t dst_addr;
209 dma_addr_t src_addr;
210};
211
212
213/**
214 * struct d40_log_lli - logical lli configuration
215 *
216 * @lcsp02: Either maps to register lcsp0 if src or lcsp2 if dst.
217 * @lcsp13: Either maps to register lcsp1 if src or lcsp3 if dst.
218 *
219 * This struct must be 8 bytes aligned since it will be accessed directy by
220 * the DMA. Never add any none hw mapped registers to this struct.
221 */
222
223struct d40_log_lli {
224 u32 lcsp02;
225 u32 lcsp13;
226};
227
228/**
229 * struct d40_log_lli_bidir - For both src and dst
230 *
231 * @src: pointer to src lli configuration.
232 * @dst: pointer to dst lli configuration.
233 *
234 * You always have a src and a dst when doing DMA transfers.
235 */
236
237struct d40_log_lli_bidir {
238 struct d40_log_lli *src;
239 struct d40_log_lli *dst;
240};
241
242/**
243 * struct d40_log_lli_full - LCPA layout
244 *
245 * @lcsp0: Logical Channel Standard Param 0 - Src.
246 * @lcsp1: Logical Channel Standard Param 1 - Src.
247 * @lcsp2: Logical Channel Standard Param 2 - Dst.
248 * @lcsp3: Logical Channel Standard Param 3 - Dst.
249 *
250 * This struct maps to LCPA physical memory layout. Must map to
251 * the hw.
252 */
253struct d40_log_lli_full {
254 u32 lcsp0;
255 u32 lcsp1;
256 u32 lcsp2;
257 u32 lcsp3;
258};
259
260/**
261 * struct d40_def_lcsp - Default LCSP1 and LCSP3 settings
262 *
263 * @lcsp3: The default configuration for dst.
264 * @lcsp1: The default configuration for src.
265 */
266struct d40_def_lcsp {
267 u32 lcsp3;
268 u32 lcsp1;
269};
270
271/**
272 * struct d40_lcla_elem - Info for one LCA element.
273 *
274 * @src_id: logical channel src id
275 * @dst_id: logical channel dst id
276 * @src: LCPA formated src parameters
277 * @dst: LCPA formated dst parameters
278 *
279 */
280struct d40_lcla_elem {
281 int src_id;
282 int dst_id;
283 struct d40_log_lli *src;
284 struct d40_log_lli *dst;
285};
286
287/* Physical channels */
288
289void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
290 u32 *src_cfg, u32 *dst_cfg, bool is_log);
291
292void d40_log_cfg(struct stedma40_chan_cfg *cfg,
293 u32 *lcsp1, u32 *lcsp2);
294
295int d40_phy_sg_to_lli(struct scatterlist *sg,
296 int sg_len,
297 dma_addr_t target,
298 struct d40_phy_lli *lli,
299 dma_addr_t lli_phys,
300 u32 reg_cfg,
301 u32 data_width,
302 int psize,
303 bool term_int);
304
305int d40_phy_fill_lli(struct d40_phy_lli *lli,
306 dma_addr_t data,
307 u32 data_size,
308 int psize,
309 dma_addr_t next_lli,
310 u32 reg_cfg,
311 bool term_int,
312 u32 data_width,
313 bool is_device);
314
315void d40_phy_lli_write(void __iomem *virtbase,
316 u32 phy_chan_num,
317 struct d40_phy_lli *lli_dst,
318 struct d40_phy_lli *lli_src);
319
320/* Logical channels */
321
322void d40_log_fill_lli(struct d40_log_lli *lli,
323 dma_addr_t data, u32 data_size,
324 u32 lli_next_off, u32 reg_cfg,
325 u32 data_width,
326 bool term_int, bool addr_inc);
327
328int d40_log_sg_to_dev(struct d40_lcla_elem *lcla,
329 struct scatterlist *sg,
330 int sg_len,
331 struct d40_log_lli_bidir *lli,
332 struct d40_def_lcsp *lcsp,
333 u32 src_data_width,
334 u32 dst_data_width,
335 enum dma_data_direction direction,
336 bool term_int, dma_addr_t dev_addr, int max_len,
337 int llis_per_log);
338
339void d40_log_lli_write(struct d40_log_lli_full *lcpa,
340 struct d40_log_lli *lcla_src,
341 struct d40_log_lli *lcla_dst,
342 struct d40_log_lli *lli_dst,
343 struct d40_log_lli *lli_src,
344 int llis_per_log);
345
346int d40_log_sg_to_lli(int lcla_id,
347 struct scatterlist *sg,
348 int sg_len,
349 struct d40_log_lli *lli_sg,
350 u32 lcsp13, /* src or dst*/
351 u32 data_width,
352 bool term_int, int max_len, int llis_per_log);
353
354#endif /* STE_DMA40_LLI_H */