diff options
Diffstat (limited to 'drivers/dma/timb_dma.c')
-rw-r--r-- | drivers/dma/timb_dma.c | 853 |
1 files changed, 853 insertions, 0 deletions
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c new file mode 100644 index 000000000000..4dd710246c79 --- /dev/null +++ b/drivers/dma/timb_dma.c | |||
@@ -0,0 +1,853 @@ | |||
1 | /* | ||
2 | * timb_dma.c timberdale FPGA DMA driver | ||
3 | * Copyright (c) 2010 Intel Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
17 | */ | ||
18 | |||
19 | /* Supports: | ||
20 | * Timberdale FPGA DMA engine | ||
21 | */ | ||
22 | |||
23 | #include <linux/dmaengine.h> | ||
24 | #include <linux/dma-mapping.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/interrupt.h> | ||
27 | #include <linux/io.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/platform_device.h> | ||
30 | |||
31 | #include <linux/timb_dma.h> | ||
32 | |||
33 | #define DRIVER_NAME "timb-dma" | ||
34 | |||
35 | /* Global DMA registers */ | ||
36 | #define TIMBDMA_ACR 0x34 | ||
37 | #define TIMBDMA_32BIT_ADDR 0x01 | ||
38 | |||
39 | #define TIMBDMA_ISR 0x080000 | ||
40 | #define TIMBDMA_IPR 0x080004 | ||
41 | #define TIMBDMA_IER 0x080008 | ||
42 | |||
43 | /* Channel specific registers */ | ||
44 | /* RX instances base addresses are 0x00, 0x40, 0x80 ... | ||
45 | * TX instances base addresses are 0x18, 0x58, 0x98 ... | ||
46 | */ | ||
47 | #define TIMBDMA_INSTANCE_OFFSET 0x40 | ||
48 | #define TIMBDMA_INSTANCE_TX_OFFSET 0x18 | ||
49 | |||
50 | /* RX registers, relative the instance base */ | ||
51 | #define TIMBDMA_OFFS_RX_DHAR 0x00 | ||
52 | #define TIMBDMA_OFFS_RX_DLAR 0x04 | ||
53 | #define TIMBDMA_OFFS_RX_LR 0x0C | ||
54 | #define TIMBDMA_OFFS_RX_BLR 0x10 | ||
55 | #define TIMBDMA_OFFS_RX_ER 0x14 | ||
56 | #define TIMBDMA_RX_EN 0x01 | ||
57 | /* bytes per Row, video specific register | ||
58 | * which is placed after the TX registers... | ||
59 | */ | ||
60 | #define TIMBDMA_OFFS_RX_BPRR 0x30 | ||
61 | |||
62 | /* TX registers, relative the instance base */ | ||
63 | #define TIMBDMA_OFFS_TX_DHAR 0x00 | ||
64 | #define TIMBDMA_OFFS_TX_DLAR 0x04 | ||
65 | #define TIMBDMA_OFFS_TX_BLR 0x0C | ||
66 | #define TIMBDMA_OFFS_TX_LR 0x14 | ||
67 | |||
68 | |||
69 | #define TIMB_DMA_DESC_SIZE 8 | ||
70 | |||
71 | struct timb_dma_desc { | ||
72 | struct list_head desc_node; | ||
73 | struct dma_async_tx_descriptor txd; | ||
74 | u8 *desc_list; | ||
75 | unsigned int desc_list_len; | ||
76 | bool interrupt; | ||
77 | }; | ||
78 | |||
79 | struct timb_dma_chan { | ||
80 | struct dma_chan chan; | ||
81 | void __iomem *membase; | ||
82 | spinlock_t lock; /* Used for mutual exclusion */ | ||
83 | dma_cookie_t last_completed_cookie; | ||
84 | bool ongoing; | ||
85 | struct list_head active_list; | ||
86 | struct list_head queue; | ||
87 | struct list_head free_list; | ||
88 | unsigned int bytes_per_line; | ||
89 | enum dma_data_direction direction; | ||
90 | unsigned int descs; /* Descriptors to allocate */ | ||
91 | unsigned int desc_elems; /* number of elems per descriptor */ | ||
92 | }; | ||
93 | |||
94 | struct timb_dma { | ||
95 | struct dma_device dma; | ||
96 | void __iomem *membase; | ||
97 | struct tasklet_struct tasklet; | ||
98 | struct timb_dma_chan channels[0]; | ||
99 | }; | ||
100 | |||
101 | static struct device *chan2dev(struct dma_chan *chan) | ||
102 | { | ||
103 | return &chan->dev->device; | ||
104 | } | ||
105 | static struct device *chan2dmadev(struct dma_chan *chan) | ||
106 | { | ||
107 | return chan2dev(chan)->parent->parent; | ||
108 | } | ||
109 | |||
110 | static struct timb_dma *tdchantotd(struct timb_dma_chan *td_chan) | ||
111 | { | ||
112 | int id = td_chan->chan.chan_id; | ||
113 | return (struct timb_dma *)((u8 *)td_chan - | ||
114 | id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma)); | ||
115 | } | ||
116 | |||
117 | /* Must be called with the spinlock held */ | ||
118 | static void __td_enable_chan_irq(struct timb_dma_chan *td_chan) | ||
119 | { | ||
120 | int id = td_chan->chan.chan_id; | ||
121 | struct timb_dma *td = tdchantotd(td_chan); | ||
122 | u32 ier; | ||
123 | |||
124 | /* enable interrupt for this channel */ | ||
125 | ier = ioread32(td->membase + TIMBDMA_IER); | ||
126 | ier |= 1 << id; | ||
127 | dev_dbg(chan2dev(&td_chan->chan), "Enabling irq: %d, IER: 0x%x\n", id, | ||
128 | ier); | ||
129 | iowrite32(ier, td->membase + TIMBDMA_IER); | ||
130 | } | ||
131 | |||
132 | /* Should be called with the spinlock held */ | ||
133 | static bool __td_dma_done_ack(struct timb_dma_chan *td_chan) | ||
134 | { | ||
135 | int id = td_chan->chan.chan_id; | ||
136 | struct timb_dma *td = (struct timb_dma *)((u8 *)td_chan - | ||
137 | id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma)); | ||
138 | u32 isr; | ||
139 | bool done = false; | ||
140 | |||
141 | dev_dbg(chan2dev(&td_chan->chan), "Checking irq: %d, td: %p\n", id, td); | ||
142 | |||
143 | isr = ioread32(td->membase + TIMBDMA_ISR) & (1 << id); | ||
144 | if (isr) { | ||
145 | iowrite32(isr, td->membase + TIMBDMA_ISR); | ||
146 | done = true; | ||
147 | } | ||
148 | |||
149 | return done; | ||
150 | } | ||
151 | |||
152 | static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc, | ||
153 | bool single) | ||
154 | { | ||
155 | dma_addr_t addr; | ||
156 | int len; | ||
157 | |||
158 | addr = (dma_desc[7] << 24) | (dma_desc[6] << 16) | (dma_desc[5] << 8) | | ||
159 | dma_desc[4]; | ||
160 | |||
161 | len = (dma_desc[3] << 8) | dma_desc[2]; | ||
162 | |||
163 | if (single) | ||
164 | dma_unmap_single(chan2dev(&td_chan->chan), addr, len, | ||
165 | td_chan->direction); | ||
166 | else | ||
167 | dma_unmap_page(chan2dev(&td_chan->chan), addr, len, | ||
168 | td_chan->direction); | ||
169 | } | ||
170 | |||
171 | static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single) | ||
172 | { | ||
173 | struct timb_dma_chan *td_chan = container_of(td_desc->txd.chan, | ||
174 | struct timb_dma_chan, chan); | ||
175 | u8 *descs; | ||
176 | |||
177 | for (descs = td_desc->desc_list; ; descs += TIMB_DMA_DESC_SIZE) { | ||
178 | __td_unmap_desc(td_chan, descs, single); | ||
179 | if (descs[0] & 0x02) | ||
180 | break; | ||
181 | } | ||
182 | } | ||
183 | |||
184 | static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc, | ||
185 | struct scatterlist *sg, bool last) | ||
186 | { | ||
187 | if (sg_dma_len(sg) > USHORT_MAX) { | ||
188 | dev_err(chan2dev(&td_chan->chan), "Too big sg element\n"); | ||
189 | return -EINVAL; | ||
190 | } | ||
191 | |||
192 | /* length must be word aligned */ | ||
193 | if (sg_dma_len(sg) % sizeof(u32)) { | ||
194 | dev_err(chan2dev(&td_chan->chan), "Incorrect length: %d\n", | ||
195 | sg_dma_len(sg)); | ||
196 | return -EINVAL; | ||
197 | } | ||
198 | |||
199 | dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: %p\n", | ||
200 | dma_desc, (void *)(int)sg_dma_address(sg)); | ||
201 | |||
202 | dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff; | ||
203 | dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff; | ||
204 | dma_desc[5] = (sg_dma_address(sg) >> 8) & 0xff; | ||
205 | dma_desc[4] = (sg_dma_address(sg) >> 0) & 0xff; | ||
206 | |||
207 | dma_desc[3] = (sg_dma_len(sg) >> 8) & 0xff; | ||
208 | dma_desc[2] = (sg_dma_len(sg) >> 0) & 0xff; | ||
209 | |||
210 | dma_desc[1] = 0x00; | ||
211 | dma_desc[0] = 0x21 | (last ? 0x02 : 0); /* tran, valid */ | ||
212 | |||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | /* Must be called with the spinlock held */ | ||
217 | static void __td_start_dma(struct timb_dma_chan *td_chan) | ||
218 | { | ||
219 | struct timb_dma_desc *td_desc; | ||
220 | |||
221 | if (td_chan->ongoing) { | ||
222 | dev_err(chan2dev(&td_chan->chan), | ||
223 | "Transfer already ongoing\n"); | ||
224 | return; | ||
225 | } | ||
226 | |||
227 | td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc, | ||
228 | desc_node); | ||
229 | |||
230 | dev_dbg(chan2dev(&td_chan->chan), | ||
231 | "td_chan: %p, chan: %d, membase: %p\n", | ||
232 | td_chan, td_chan->chan.chan_id, td_chan->membase); | ||
233 | |||
234 | if (td_chan->direction == DMA_FROM_DEVICE) { | ||
235 | |||
236 | /* descriptor address */ | ||
237 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR); | ||
238 | iowrite32(td_desc->txd.phys, td_chan->membase + | ||
239 | TIMBDMA_OFFS_RX_DLAR); | ||
240 | /* Bytes per line */ | ||
241 | iowrite32(td_chan->bytes_per_line, td_chan->membase + | ||
242 | TIMBDMA_OFFS_RX_BPRR); | ||
243 | /* enable RX */ | ||
244 | iowrite32(TIMBDMA_RX_EN, td_chan->membase + TIMBDMA_OFFS_RX_ER); | ||
245 | } else { | ||
246 | /* address high */ | ||
247 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DHAR); | ||
248 | iowrite32(td_desc->txd.phys, td_chan->membase + | ||
249 | TIMBDMA_OFFS_TX_DLAR); | ||
250 | } | ||
251 | |||
252 | td_chan->ongoing = true; | ||
253 | |||
254 | if (td_desc->interrupt) | ||
255 | __td_enable_chan_irq(td_chan); | ||
256 | } | ||
257 | |||
258 | static void __td_finish(struct timb_dma_chan *td_chan) | ||
259 | { | ||
260 | dma_async_tx_callback callback; | ||
261 | void *param; | ||
262 | struct dma_async_tx_descriptor *txd; | ||
263 | struct timb_dma_desc *td_desc; | ||
264 | |||
265 | /* can happen if the descriptor is canceled */ | ||
266 | if (list_empty(&td_chan->active_list)) | ||
267 | return; | ||
268 | |||
269 | td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc, | ||
270 | desc_node); | ||
271 | txd = &td_desc->txd; | ||
272 | |||
273 | dev_dbg(chan2dev(&td_chan->chan), "descriptor %u complete\n", | ||
274 | txd->cookie); | ||
275 | |||
276 | /* make sure to stop the transfer */ | ||
277 | if (td_chan->direction == DMA_FROM_DEVICE) | ||
278 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER); | ||
279 | /* Currently no support for stopping DMA transfers | ||
280 | else | ||
281 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR); | ||
282 | */ | ||
283 | td_chan->last_completed_cookie = txd->cookie; | ||
284 | td_chan->ongoing = false; | ||
285 | |||
286 | callback = txd->callback; | ||
287 | param = txd->callback_param; | ||
288 | |||
289 | list_move(&td_desc->desc_node, &td_chan->free_list); | ||
290 | |||
291 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) | ||
292 | __td_unmap_descs(td_desc, | ||
293 | txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE); | ||
294 | |||
295 | /* | ||
296 | * The API requires that no submissions are done from a | ||
297 | * callback, so we don't need to drop the lock here | ||
298 | */ | ||
299 | if (callback) | ||
300 | callback(param); | ||
301 | } | ||
302 | |||
303 | static u32 __td_ier_mask(struct timb_dma *td) | ||
304 | { | ||
305 | int i; | ||
306 | u32 ret = 0; | ||
307 | |||
308 | for (i = 0; i < td->dma.chancnt; i++) { | ||
309 | struct timb_dma_chan *td_chan = td->channels + i; | ||
310 | if (td_chan->ongoing) { | ||
311 | struct timb_dma_desc *td_desc = | ||
312 | list_entry(td_chan->active_list.next, | ||
313 | struct timb_dma_desc, desc_node); | ||
314 | if (td_desc->interrupt) | ||
315 | ret |= 1 << i; | ||
316 | } | ||
317 | } | ||
318 | |||
319 | return ret; | ||
320 | } | ||
321 | |||
322 | static void __td_start_next(struct timb_dma_chan *td_chan) | ||
323 | { | ||
324 | struct timb_dma_desc *td_desc; | ||
325 | |||
326 | BUG_ON(list_empty(&td_chan->queue)); | ||
327 | BUG_ON(td_chan->ongoing); | ||
328 | |||
329 | td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc, | ||
330 | desc_node); | ||
331 | |||
332 | dev_dbg(chan2dev(&td_chan->chan), "%s: started %u\n", | ||
333 | __func__, td_desc->txd.cookie); | ||
334 | |||
335 | list_move(&td_desc->desc_node, &td_chan->active_list); | ||
336 | __td_start_dma(td_chan); | ||
337 | } | ||
338 | |||
339 | static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd) | ||
340 | { | ||
341 | struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc, | ||
342 | txd); | ||
343 | struct timb_dma_chan *td_chan = container_of(txd->chan, | ||
344 | struct timb_dma_chan, chan); | ||
345 | dma_cookie_t cookie; | ||
346 | |||
347 | spin_lock_bh(&td_chan->lock); | ||
348 | |||
349 | cookie = txd->chan->cookie; | ||
350 | if (++cookie < 0) | ||
351 | cookie = 1; | ||
352 | txd->chan->cookie = cookie; | ||
353 | txd->cookie = cookie; | ||
354 | |||
355 | if (list_empty(&td_chan->active_list)) { | ||
356 | dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__, | ||
357 | txd->cookie); | ||
358 | list_add_tail(&td_desc->desc_node, &td_chan->active_list); | ||
359 | __td_start_dma(td_chan); | ||
360 | } else { | ||
361 | dev_dbg(chan2dev(txd->chan), "tx_submit: queued %u\n", | ||
362 | txd->cookie); | ||
363 | |||
364 | list_add_tail(&td_desc->desc_node, &td_chan->queue); | ||
365 | } | ||
366 | |||
367 | spin_unlock_bh(&td_chan->lock); | ||
368 | |||
369 | return cookie; | ||
370 | } | ||
371 | |||
372 | static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan) | ||
373 | { | ||
374 | struct dma_chan *chan = &td_chan->chan; | ||
375 | struct timb_dma_desc *td_desc; | ||
376 | int err; | ||
377 | |||
378 | td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL); | ||
379 | if (!td_desc) { | ||
380 | dev_err(chan2dev(chan), "Failed to alloc descriptor\n"); | ||
381 | goto err; | ||
382 | } | ||
383 | |||
384 | td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE; | ||
385 | |||
386 | td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL); | ||
387 | if (!td_desc->desc_list) { | ||
388 | dev_err(chan2dev(chan), "Failed to alloc descriptor\n"); | ||
389 | goto err; | ||
390 | } | ||
391 | |||
392 | dma_async_tx_descriptor_init(&td_desc->txd, chan); | ||
393 | td_desc->txd.tx_submit = td_tx_submit; | ||
394 | td_desc->txd.flags = DMA_CTRL_ACK; | ||
395 | |||
396 | td_desc->txd.phys = dma_map_single(chan2dmadev(chan), | ||
397 | td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE); | ||
398 | |||
399 | err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys); | ||
400 | if (err) { | ||
401 | dev_err(chan2dev(chan), "DMA mapping error: %d\n", err); | ||
402 | goto err; | ||
403 | } | ||
404 | |||
405 | return td_desc; | ||
406 | err: | ||
407 | kfree(td_desc->desc_list); | ||
408 | kfree(td_desc); | ||
409 | |||
410 | return NULL; | ||
411 | |||
412 | } | ||
413 | |||
414 | static void td_free_desc(struct timb_dma_desc *td_desc) | ||
415 | { | ||
416 | dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc); | ||
417 | dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys, | ||
418 | td_desc->desc_list_len, DMA_TO_DEVICE); | ||
419 | |||
420 | kfree(td_desc->desc_list); | ||
421 | kfree(td_desc); | ||
422 | } | ||
423 | |||
424 | static void td_desc_put(struct timb_dma_chan *td_chan, | ||
425 | struct timb_dma_desc *td_desc) | ||
426 | { | ||
427 | dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc); | ||
428 | |||
429 | spin_lock_bh(&td_chan->lock); | ||
430 | list_add(&td_desc->desc_node, &td_chan->free_list); | ||
431 | spin_unlock_bh(&td_chan->lock); | ||
432 | } | ||
433 | |||
434 | static struct timb_dma_desc *td_desc_get(struct timb_dma_chan *td_chan) | ||
435 | { | ||
436 | struct timb_dma_desc *td_desc, *_td_desc; | ||
437 | struct timb_dma_desc *ret = NULL; | ||
438 | |||
439 | spin_lock_bh(&td_chan->lock); | ||
440 | list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list, | ||
441 | desc_node) { | ||
442 | if (async_tx_test_ack(&td_desc->txd)) { | ||
443 | list_del(&td_desc->desc_node); | ||
444 | ret = td_desc; | ||
445 | break; | ||
446 | } | ||
447 | dev_dbg(chan2dev(&td_chan->chan), "desc %p not ACKed\n", | ||
448 | td_desc); | ||
449 | } | ||
450 | spin_unlock_bh(&td_chan->lock); | ||
451 | |||
452 | return ret; | ||
453 | } | ||
454 | |||
455 | static int td_alloc_chan_resources(struct dma_chan *chan) | ||
456 | { | ||
457 | struct timb_dma_chan *td_chan = | ||
458 | container_of(chan, struct timb_dma_chan, chan); | ||
459 | int i; | ||
460 | |||
461 | dev_dbg(chan2dev(chan), "%s: entry\n", __func__); | ||
462 | |||
463 | BUG_ON(!list_empty(&td_chan->free_list)); | ||
464 | for (i = 0; i < td_chan->descs; i++) { | ||
465 | struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan); | ||
466 | if (!td_desc) { | ||
467 | if (i) | ||
468 | break; | ||
469 | else { | ||
470 | dev_err(chan2dev(chan), | ||
471 | "Couldnt allocate any descriptors\n"); | ||
472 | return -ENOMEM; | ||
473 | } | ||
474 | } | ||
475 | |||
476 | td_desc_put(td_chan, td_desc); | ||
477 | } | ||
478 | |||
479 | spin_lock_bh(&td_chan->lock); | ||
480 | td_chan->last_completed_cookie = 1; | ||
481 | chan->cookie = 1; | ||
482 | spin_unlock_bh(&td_chan->lock); | ||
483 | |||
484 | return 0; | ||
485 | } | ||
486 | |||
487 | static void td_free_chan_resources(struct dma_chan *chan) | ||
488 | { | ||
489 | struct timb_dma_chan *td_chan = | ||
490 | container_of(chan, struct timb_dma_chan, chan); | ||
491 | struct timb_dma_desc *td_desc, *_td_desc; | ||
492 | LIST_HEAD(list); | ||
493 | |||
494 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); | ||
495 | |||
496 | /* check that all descriptors are free */ | ||
497 | BUG_ON(!list_empty(&td_chan->active_list)); | ||
498 | BUG_ON(!list_empty(&td_chan->queue)); | ||
499 | |||
500 | spin_lock_bh(&td_chan->lock); | ||
501 | list_splice_init(&td_chan->free_list, &list); | ||
502 | spin_unlock_bh(&td_chan->lock); | ||
503 | |||
504 | list_for_each_entry_safe(td_desc, _td_desc, &list, desc_node) { | ||
505 | dev_dbg(chan2dev(chan), "%s: Freeing desc: %p\n", __func__, | ||
506 | td_desc); | ||
507 | td_free_desc(td_desc); | ||
508 | } | ||
509 | } | ||
510 | |||
511 | static enum dma_status td_is_tx_complete(struct dma_chan *chan, | ||
512 | dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used) | ||
513 | { | ||
514 | struct timb_dma_chan *td_chan = | ||
515 | container_of(chan, struct timb_dma_chan, chan); | ||
516 | dma_cookie_t last_used; | ||
517 | dma_cookie_t last_complete; | ||
518 | int ret; | ||
519 | |||
520 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); | ||
521 | |||
522 | last_complete = td_chan->last_completed_cookie; | ||
523 | last_used = chan->cookie; | ||
524 | |||
525 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
526 | |||
527 | if (done) | ||
528 | *done = last_complete; | ||
529 | if (used) | ||
530 | *used = last_used; | ||
531 | |||
532 | dev_dbg(chan2dev(chan), | ||
533 | "%s: exit, ret: %d, last_complete: %d, last_used: %d\n", | ||
534 | __func__, ret, last_complete, last_used); | ||
535 | |||
536 | return ret; | ||
537 | } | ||
538 | |||
539 | static void td_issue_pending(struct dma_chan *chan) | ||
540 | { | ||
541 | struct timb_dma_chan *td_chan = | ||
542 | container_of(chan, struct timb_dma_chan, chan); | ||
543 | |||
544 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); | ||
545 | spin_lock_bh(&td_chan->lock); | ||
546 | |||
547 | if (!list_empty(&td_chan->active_list)) | ||
548 | /* transfer ongoing */ | ||
549 | if (__td_dma_done_ack(td_chan)) | ||
550 | __td_finish(td_chan); | ||
551 | |||
552 | if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue)) | ||
553 | __td_start_next(td_chan); | ||
554 | |||
555 | spin_unlock_bh(&td_chan->lock); | ||
556 | } | ||
557 | |||
558 | static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, | ||
559 | struct scatterlist *sgl, unsigned int sg_len, | ||
560 | enum dma_data_direction direction, unsigned long flags) | ||
561 | { | ||
562 | struct timb_dma_chan *td_chan = | ||
563 | container_of(chan, struct timb_dma_chan, chan); | ||
564 | struct timb_dma_desc *td_desc; | ||
565 | struct scatterlist *sg; | ||
566 | unsigned int i; | ||
567 | unsigned int desc_usage = 0; | ||
568 | |||
569 | if (!sgl || !sg_len) { | ||
570 | dev_err(chan2dev(chan), "%s: No SG list\n", __func__); | ||
571 | return NULL; | ||
572 | } | ||
573 | |||
574 | /* even channels are for RX, odd for TX */ | ||
575 | if (td_chan->direction != direction) { | ||
576 | dev_err(chan2dev(chan), | ||
577 | "Requesting channel in wrong direction\n"); | ||
578 | return NULL; | ||
579 | } | ||
580 | |||
581 | td_desc = td_desc_get(td_chan); | ||
582 | if (!td_desc) { | ||
583 | dev_err(chan2dev(chan), "Not enough descriptors available\n"); | ||
584 | return NULL; | ||
585 | } | ||
586 | |||
587 | td_desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0; | ||
588 | |||
589 | for_each_sg(sgl, sg, sg_len, i) { | ||
590 | int err; | ||
591 | if (desc_usage > td_desc->desc_list_len) { | ||
592 | dev_err(chan2dev(chan), "No descriptor space\n"); | ||
593 | return NULL; | ||
594 | } | ||
595 | |||
596 | err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg, | ||
597 | i == (sg_len - 1)); | ||
598 | if (err) { | ||
599 | dev_err(chan2dev(chan), "Failed to update desc: %d\n", | ||
600 | err); | ||
601 | td_desc_put(td_chan, td_desc); | ||
602 | return NULL; | ||
603 | } | ||
604 | desc_usage += TIMB_DMA_DESC_SIZE; | ||
605 | } | ||
606 | |||
607 | dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys, | ||
608 | td_desc->desc_list_len, DMA_TO_DEVICE); | ||
609 | |||
610 | return &td_desc->txd; | ||
611 | } | ||
612 | |||
613 | static void td_terminate_all(struct dma_chan *chan) | ||
614 | { | ||
615 | struct timb_dma_chan *td_chan = | ||
616 | container_of(chan, struct timb_dma_chan, chan); | ||
617 | struct timb_dma_desc *td_desc, *_td_desc; | ||
618 | |||
619 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); | ||
620 | |||
621 | /* first the easy part, put the queue into the free list */ | ||
622 | spin_lock_bh(&td_chan->lock); | ||
623 | list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue, | ||
624 | desc_node) | ||
625 | list_move(&td_desc->desc_node, &td_chan->free_list); | ||
626 | |||
627 | /* now tear down the runnning */ | ||
628 | __td_finish(td_chan); | ||
629 | spin_unlock_bh(&td_chan->lock); | ||
630 | } | ||
631 | |||
632 | static void td_tasklet(unsigned long data) | ||
633 | { | ||
634 | struct timb_dma *td = (struct timb_dma *)data; | ||
635 | u32 isr; | ||
636 | u32 ipr; | ||
637 | u32 ier; | ||
638 | int i; | ||
639 | |||
640 | isr = ioread32(td->membase + TIMBDMA_ISR); | ||
641 | ipr = isr & __td_ier_mask(td); | ||
642 | |||
643 | /* ack the interrupts */ | ||
644 | iowrite32(ipr, td->membase + TIMBDMA_ISR); | ||
645 | |||
646 | for (i = 0; i < td->dma.chancnt; i++) | ||
647 | if (ipr & (1 << i)) { | ||
648 | struct timb_dma_chan *td_chan = td->channels + i; | ||
649 | spin_lock(&td_chan->lock); | ||
650 | __td_finish(td_chan); | ||
651 | if (!list_empty(&td_chan->queue)) | ||
652 | __td_start_next(td_chan); | ||
653 | spin_unlock(&td_chan->lock); | ||
654 | } | ||
655 | |||
656 | ier = __td_ier_mask(td); | ||
657 | iowrite32(ier, td->membase + TIMBDMA_IER); | ||
658 | } | ||
659 | |||
660 | |||
661 | static irqreturn_t td_irq(int irq, void *devid) | ||
662 | { | ||
663 | struct timb_dma *td = devid; | ||
664 | u32 ipr = ioread32(td->membase + TIMBDMA_IPR); | ||
665 | |||
666 | if (ipr) { | ||
667 | /* disable interrupts, will be re-enabled in tasklet */ | ||
668 | iowrite32(0, td->membase + TIMBDMA_IER); | ||
669 | |||
670 | tasklet_schedule(&td->tasklet); | ||
671 | |||
672 | return IRQ_HANDLED; | ||
673 | } else | ||
674 | return IRQ_NONE; | ||
675 | } | ||
676 | |||
677 | |||
678 | static int __devinit td_probe(struct platform_device *pdev) | ||
679 | { | ||
680 | struct timb_dma_platform_data *pdata = pdev->dev.platform_data; | ||
681 | struct timb_dma *td; | ||
682 | struct resource *iomem; | ||
683 | int irq; | ||
684 | int err; | ||
685 | int i; | ||
686 | |||
687 | if (!pdata) { | ||
688 | dev_err(&pdev->dev, "No platform data\n"); | ||
689 | return -EINVAL; | ||
690 | } | ||
691 | |||
692 | iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
693 | if (!iomem) | ||
694 | return -EINVAL; | ||
695 | |||
696 | irq = platform_get_irq(pdev, 0); | ||
697 | if (irq < 0) | ||
698 | return irq; | ||
699 | |||
700 | if (!request_mem_region(iomem->start, resource_size(iomem), | ||
701 | DRIVER_NAME)) | ||
702 | return -EBUSY; | ||
703 | |||
704 | td = kzalloc(sizeof(struct timb_dma) + | ||
705 | sizeof(struct timb_dma_chan) * pdata->nr_channels, GFP_KERNEL); | ||
706 | if (!td) { | ||
707 | err = -ENOMEM; | ||
708 | goto err_release_region; | ||
709 | } | ||
710 | |||
711 | dev_dbg(&pdev->dev, "Allocated TD: %p\n", td); | ||
712 | |||
713 | td->membase = ioremap(iomem->start, resource_size(iomem)); | ||
714 | if (!td->membase) { | ||
715 | dev_err(&pdev->dev, "Failed to remap I/O memory\n"); | ||
716 | err = -ENOMEM; | ||
717 | goto err_free_mem; | ||
718 | } | ||
719 | |||
720 | /* 32bit addressing */ | ||
721 | iowrite32(TIMBDMA_32BIT_ADDR, td->membase + TIMBDMA_ACR); | ||
722 | |||
723 | /* disable and clear any interrupts */ | ||
724 | iowrite32(0x0, td->membase + TIMBDMA_IER); | ||
725 | iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR); | ||
726 | |||
727 | tasklet_init(&td->tasklet, td_tasklet, (unsigned long)td); | ||
728 | |||
729 | err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td); | ||
730 | if (err) { | ||
731 | dev_err(&pdev->dev, "Failed to request IRQ\n"); | ||
732 | goto err_tasklet_kill; | ||
733 | } | ||
734 | |||
735 | td->dma.device_alloc_chan_resources = td_alloc_chan_resources; | ||
736 | td->dma.device_free_chan_resources = td_free_chan_resources; | ||
737 | td->dma.device_is_tx_complete = td_is_tx_complete; | ||
738 | td->dma.device_issue_pending = td_issue_pending; | ||
739 | |||
740 | dma_cap_set(DMA_SLAVE, td->dma.cap_mask); | ||
741 | dma_cap_set(DMA_PRIVATE, td->dma.cap_mask); | ||
742 | td->dma.device_prep_slave_sg = td_prep_slave_sg; | ||
743 | td->dma.device_terminate_all = td_terminate_all; | ||
744 | |||
745 | td->dma.dev = &pdev->dev; | ||
746 | |||
747 | INIT_LIST_HEAD(&td->dma.channels); | ||
748 | |||
749 | for (i = 0; i < pdata->nr_channels; i++, td->dma.chancnt++) { | ||
750 | struct timb_dma_chan *td_chan = &td->channels[i]; | ||
751 | struct timb_dma_platform_data_channel *pchan = | ||
752 | pdata->channels + i; | ||
753 | |||
754 | /* even channels are RX, odd are TX */ | ||
755 | if (((i % 2) && pchan->rx) || (!(i % 2) && !pchan->rx)) { | ||
756 | dev_err(&pdev->dev, "Wrong channel configuration\n"); | ||
757 | err = -EINVAL; | ||
758 | goto err_tasklet_kill; | ||
759 | } | ||
760 | |||
761 | td_chan->chan.device = &td->dma; | ||
762 | td_chan->chan.cookie = 1; | ||
763 | td_chan->chan.chan_id = i; | ||
764 | spin_lock_init(&td_chan->lock); | ||
765 | INIT_LIST_HEAD(&td_chan->active_list); | ||
766 | INIT_LIST_HEAD(&td_chan->queue); | ||
767 | INIT_LIST_HEAD(&td_chan->free_list); | ||
768 | |||
769 | td_chan->descs = pchan->descriptors; | ||
770 | td_chan->desc_elems = pchan->descriptor_elements; | ||
771 | td_chan->bytes_per_line = pchan->bytes_per_line; | ||
772 | td_chan->direction = pchan->rx ? DMA_FROM_DEVICE : | ||
773 | DMA_TO_DEVICE; | ||
774 | |||
775 | td_chan->membase = td->membase + | ||
776 | (i / 2) * TIMBDMA_INSTANCE_OFFSET + | ||
777 | (pchan->rx ? 0 : TIMBDMA_INSTANCE_TX_OFFSET); | ||
778 | |||
779 | dev_dbg(&pdev->dev, "Chan: %d, membase: %p\n", | ||
780 | i, td_chan->membase); | ||
781 | |||
782 | list_add_tail(&td_chan->chan.device_node, &td->dma.channels); | ||
783 | } | ||
784 | |||
785 | err = dma_async_device_register(&td->dma); | ||
786 | if (err) { | ||
787 | dev_err(&pdev->dev, "Failed to register async device\n"); | ||
788 | goto err_free_irq; | ||
789 | } | ||
790 | |||
791 | platform_set_drvdata(pdev, td); | ||
792 | |||
793 | dev_dbg(&pdev->dev, "Probe result: %d\n", err); | ||
794 | return err; | ||
795 | |||
796 | err_free_irq: | ||
797 | free_irq(irq, td); | ||
798 | err_tasklet_kill: | ||
799 | tasklet_kill(&td->tasklet); | ||
800 | iounmap(td->membase); | ||
801 | err_free_mem: | ||
802 | kfree(td); | ||
803 | err_release_region: | ||
804 | release_mem_region(iomem->start, resource_size(iomem)); | ||
805 | |||
806 | return err; | ||
807 | |||
808 | } | ||
809 | |||
810 | static int __devexit td_remove(struct platform_device *pdev) | ||
811 | { | ||
812 | struct timb_dma *td = platform_get_drvdata(pdev); | ||
813 | struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
814 | int irq = platform_get_irq(pdev, 0); | ||
815 | |||
816 | dma_async_device_unregister(&td->dma); | ||
817 | free_irq(irq, td); | ||
818 | tasklet_kill(&td->tasklet); | ||
819 | iounmap(td->membase); | ||
820 | kfree(td); | ||
821 | release_mem_region(iomem->start, resource_size(iomem)); | ||
822 | |||
823 | platform_set_drvdata(pdev, NULL); | ||
824 | |||
825 | dev_dbg(&pdev->dev, "Removed...\n"); | ||
826 | return 0; | ||
827 | } | ||
828 | |||
829 | static struct platform_driver td_driver = { | ||
830 | .driver = { | ||
831 | .name = DRIVER_NAME, | ||
832 | .owner = THIS_MODULE, | ||
833 | }, | ||
834 | .probe = td_probe, | ||
835 | .remove = __exit_p(td_remove), | ||
836 | }; | ||
837 | |||
838 | static int __init td_init(void) | ||
839 | { | ||
840 | return platform_driver_register(&td_driver); | ||
841 | } | ||
842 | module_init(td_init); | ||
843 | |||
844 | static void __exit td_exit(void) | ||
845 | { | ||
846 | platform_driver_unregister(&td_driver); | ||
847 | } | ||
848 | module_exit(td_exit); | ||
849 | |||
850 | MODULE_LICENSE("GPL v2"); | ||
851 | MODULE_DESCRIPTION("Timberdale DMA controller driver"); | ||
852 | MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>"); | ||
853 | MODULE_ALIAS("platform:"DRIVER_NAME); | ||