aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/coh901318_lli.c
diff options
context:
space:
mode:
authorLinus Walleij <linus.walleij@stericsson.com>2009-11-19 13:49:17 -0500
committerDan Williams <dan.j.williams@intel.com>2009-11-20 01:45:19 -0500
commit61f135b92f4758bc4d4767cd0a5d2da954e27f14 (patch)
tree388fdc08150e2f8fcb2859f70ca67cdd86616f36 /drivers/dma/coh901318_lli.c
parentb419148e567728f6af0c3b01965c1cc141e3e13a (diff)
Add COH 901 318 DMA block driver v5
This patch adds support for the ST-Ericsson COH 901 318 DMA block, found in the U300 series platforms. It registers a DMA slave for device I/O and also a memcpy slave for memcpy. Signed-off-by: Linus Walleij <linus.walleij@stericsson.com> Acked-by: Maciej Sosnowski <maciej.sosnowski@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/coh901318_lli.c')
-rw-r--r--drivers/dma/coh901318_lli.c318
1 files changed, 318 insertions, 0 deletions
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c
new file mode 100644
index 000000000000..f5120f238a4d
--- /dev/null
+++ b/drivers/dma/coh901318_lli.c
@@ -0,0 +1,318 @@
1/*
2 * driver/dma/coh901318_lli.c
3 *
4 * Copyright (C) 2007-2009 ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2
6 * Support functions for handling lli for dma
7 * Author: Per Friden <per.friden@stericsson.com>
8 */
9
10#include <linux/dma-mapping.h>
11#include <linux/spinlock.h>
12#include <linux/dmapool.h>
13#include <linux/memory.h>
14#include <mach/coh901318.h>
15
16#include "coh901318_lli.h"
17
18#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_U300_DEBUG))
19#define DEBUGFS_POOL_COUNTER_RESET(pool) (pool->debugfs_pool_counter = 0)
20#define DEBUGFS_POOL_COUNTER_ADD(pool, add) (pool->debugfs_pool_counter += add)
21#else
22#define DEBUGFS_POOL_COUNTER_RESET(pool)
23#define DEBUGFS_POOL_COUNTER_ADD(pool, add)
24#endif
25
26static struct coh901318_lli *
27coh901318_lli_next(struct coh901318_lli *data)
28{
29 if (data == NULL || data->link_addr == 0)
30 return NULL;
31
32 return (struct coh901318_lli *) data->virt_link_addr;
33}
34
35int coh901318_pool_create(struct coh901318_pool *pool,
36 struct device *dev,
37 size_t size, size_t align)
38{
39 spin_lock_init(&pool->lock);
40 pool->dev = dev;
41 pool->dmapool = dma_pool_create("lli_pool", dev, size, align, 0);
42
43 DEBUGFS_POOL_COUNTER_RESET(pool);
44 return 0;
45}
46
47int coh901318_pool_destroy(struct coh901318_pool *pool)
48{
49
50 dma_pool_destroy(pool->dmapool);
51 return 0;
52}
53
54struct coh901318_lli *
55coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
56{
57 int i;
58 struct coh901318_lli *head;
59 struct coh901318_lli *lli;
60 struct coh901318_lli *lli_prev;
61 dma_addr_t phy;
62
63 if (len == 0)
64 goto err;
65
66 spin_lock(&pool->lock);
67
68 head = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
69
70 if (head == NULL)
71 goto err;
72
73 DEBUGFS_POOL_COUNTER_ADD(pool, 1);
74
75 lli = head;
76 lli->phy_this = phy;
77
78 for (i = 1; i < len; i++) {
79 lli_prev = lli;
80
81 lli = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
82
83 if (lli == NULL)
84 goto err_clean_up;
85
86 DEBUGFS_POOL_COUNTER_ADD(pool, 1);
87 lli->phy_this = phy;
88
89 lli_prev->link_addr = phy;
90 lli_prev->virt_link_addr = lli;
91 }
92
93 lli->link_addr = 0x00000000U;
94
95 spin_unlock(&pool->lock);
96
97 return head;
98
99 err:
100 spin_unlock(&pool->lock);
101 return NULL;
102
103 err_clean_up:
104 lli_prev->link_addr = 0x00000000U;
105 spin_unlock(&pool->lock);
106 coh901318_lli_free(pool, &head);
107 return NULL;
108}
109
110void coh901318_lli_free(struct coh901318_pool *pool,
111 struct coh901318_lli **lli)
112{
113 struct coh901318_lli *l;
114 struct coh901318_lli *next;
115
116 if (lli == NULL)
117 return;
118
119 l = *lli;
120
121 if (l == NULL)
122 return;
123
124 spin_lock(&pool->lock);
125
126 while (l->link_addr) {
127 next = l->virt_link_addr;
128 dma_pool_free(pool->dmapool, l, l->phy_this);
129 DEBUGFS_POOL_COUNTER_ADD(pool, -1);
130 l = next;
131 }
132 dma_pool_free(pool->dmapool, l, l->phy_this);
133 DEBUGFS_POOL_COUNTER_ADD(pool, -1);
134
135 spin_unlock(&pool->lock);
136 *lli = NULL;
137}
138
139int
140coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
141 struct coh901318_lli *lli,
142 dma_addr_t source, unsigned int size,
143 dma_addr_t destination, u32 ctrl_chained,
144 u32 ctrl_eom)
145{
146 int s = size;
147 dma_addr_t src = source;
148 dma_addr_t dst = destination;
149
150 lli->src_addr = src;
151 lli->dst_addr = dst;
152
153 while (lli->link_addr) {
154 lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
155 lli->src_addr = src;
156 lli->dst_addr = dst;
157
158 s -= MAX_DMA_PACKET_SIZE;
159 lli = coh901318_lli_next(lli);
160
161 src += MAX_DMA_PACKET_SIZE;
162 dst += MAX_DMA_PACKET_SIZE;
163 }
164
165 lli->control = ctrl_eom | s;
166 lli->src_addr = src;
167 lli->dst_addr = dst;
168
169 /* One irq per single transfer */
170 return 1;
171}
172
173int
174coh901318_lli_fill_single(struct coh901318_pool *pool,
175 struct coh901318_lli *lli,
176 dma_addr_t buf, unsigned int size,
177 dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom,
178 enum dma_data_direction dir)
179{
180 int s = size;
181 dma_addr_t src;
182 dma_addr_t dst;
183
184
185 if (dir == DMA_TO_DEVICE) {
186 src = buf;
187 dst = dev_addr;
188
189 } else if (dir == DMA_FROM_DEVICE) {
190
191 src = dev_addr;
192 dst = buf;
193 } else {
194 return -EINVAL;
195 }
196
197 while (lli->link_addr) {
198 size_t block_size = MAX_DMA_PACKET_SIZE;
199 lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
200
201 /* If we are on the next-to-final block and there will
202 * be less than half a DMA packet left for the last
203 * block, then we want to make this block a little
204 * smaller to balance the sizes. This is meant to
205 * avoid too small transfers if the buffer size is
206 * (MAX_DMA_PACKET_SIZE*N + 1) */
207 if (s < (MAX_DMA_PACKET_SIZE + MAX_DMA_PACKET_SIZE/2))
208 block_size = MAX_DMA_PACKET_SIZE/2;
209
210 s -= block_size;
211 lli->src_addr = src;
212 lli->dst_addr = dst;
213
214 lli = coh901318_lli_next(lli);
215
216 if (dir == DMA_TO_DEVICE)
217 src += block_size;
218 else if (dir == DMA_FROM_DEVICE)
219 dst += block_size;
220 }
221
222 lli->control = ctrl_eom | s;
223 lli->src_addr = src;
224 lli->dst_addr = dst;
225
226 /* One irq per single transfer */
227 return 1;
228}
229
230int
231coh901318_lli_fill_sg(struct coh901318_pool *pool,
232 struct coh901318_lli *lli,
233 struct scatterlist *sgl, unsigned int nents,
234 dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl,
235 u32 ctrl_last,
236 enum dma_data_direction dir, u32 ctrl_irq_mask)
237{
238 int i;
239 struct scatterlist *sg;
240 u32 ctrl_sg;
241 dma_addr_t src = 0;
242 dma_addr_t dst = 0;
243 int nbr_of_irq = 0;
244 u32 bytes_to_transfer;
245 u32 elem_size;
246
247 if (lli == NULL)
248 goto err;
249
250 spin_lock(&pool->lock);
251
252 if (dir == DMA_TO_DEVICE)
253 dst = dev_addr;
254 else if (dir == DMA_FROM_DEVICE)
255 src = dev_addr;
256 else
257 goto err;
258
259 for_each_sg(sgl, sg, nents, i) {
260 if (sg_is_chain(sg)) {
261 /* sg continues to the next sg-element don't
262 * send ctrl_finish until the last
263 * sg-element in the chain
264 */
265 ctrl_sg = ctrl_chained;
266 } else if (i == nents - 1)
267 ctrl_sg = ctrl_last;
268 else
269 ctrl_sg = ctrl ? ctrl : ctrl_last;
270
271
272 if ((ctrl_sg & ctrl_irq_mask))
273 nbr_of_irq++;
274
275 if (dir == DMA_TO_DEVICE)
276 /* increment source address */
277 src = sg_dma_address(sg);
278 else
279 /* increment destination address */
280 dst = sg_dma_address(sg);
281
282 bytes_to_transfer = sg_dma_len(sg);
283
284 while (bytes_to_transfer) {
285 u32 val;
286
287 if (bytes_to_transfer > MAX_DMA_PACKET_SIZE) {
288 elem_size = MAX_DMA_PACKET_SIZE;
289 val = ctrl_chained;
290 } else {
291 elem_size = bytes_to_transfer;
292 val = ctrl_sg;
293 }
294
295 lli->control = val | elem_size;
296 lli->src_addr = src;
297 lli->dst_addr = dst;
298
299 if (dir == DMA_FROM_DEVICE)
300 dst += elem_size;
301 else
302 src += elem_size;
303
304 BUG_ON(lli->link_addr & 3);
305
306 bytes_to_transfer -= elem_size;
307 lli = coh901318_lli_next(lli);
308 }
309
310 }
311 spin_unlock(&pool->lock);
312
313 /* There can be many IRQs per sg transfer */
314 return nbr_of_irq;
315 err:
316 spin_unlock(&pool->lock);
317 return -EINVAL;
318}