aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/at_hdmac.c
diff options
context:
space:
mode:
authorNicolas Ferre <nicolas.ferre@atmel.com>2009-07-03 13:24:33 -0400
committerDan Williams <dan.j.williams@intel.com>2009-07-23 01:41:27 -0400
commitdc78baa2b90b289590911b40b6800f77d0dc935a (patch)
treedb54dedb1e13a413190ad637ccaf6f5557dc9c10 /drivers/dma/at_hdmac.c
parentf1aef8b6e6abf32a3a269542f95a19e2cb319f6c (diff)
dmaengine: at_hdmac: new driver for the Atmel AHB DMA Controller
This AHB DMA Controller (aka HDMA or DMAC on AT91 systems) is availlable on at91sam9rl chip. It will be used on other products in the future. This first release covers only the memory-to-memory tranfer type. This is the only tranfer type supported by this chip. On other products, it will be used also for peripheral DMA transfer (slave API support to come). I used dmatest client without problem in different configurations to test it. Full documentation for this controller can be found in the SAM9RL datasheet: http://www.atmel.com/dyn/products/product_card.asp?part_id=4243 Signed-off-by: Nicolas Ferre <nicolas.ferre@atmel.com> Acked-by: Maciej Sosnowski <maciej.sosnowski@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/at_hdmac.c')
-rw-r--r--drivers/dma/at_hdmac.c1009
1 files changed, 1009 insertions, 0 deletions
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
new file mode 100644
index 000000000000..64dbf0ce128e
--- /dev/null
+++ b/drivers/dma/at_hdmac.c
@@ -0,0 +1,1009 @@
1/*
2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
3 *
4 * Copyright (C) 2008 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 *
12 * This supports the Atmel AHB DMA Controller,
13 *
14 * The driver has currently been tested with the Atmel AT91SAM9RL
15 * and AT91SAM9G45 series.
16 */
17
18#include <linux/clk.h>
19#include <linux/dmaengine.h>
20#include <linux/dma-mapping.h>
21#include <linux/dmapool.h>
22#include <linux/interrupt.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
25
26#include "at_hdmac_regs.h"
27
28/*
29 * Glossary
30 * --------
31 *
32 * at_hdmac : Name of the ATmel AHB DMA Controller
33 * at_dma_ / atdma : ATmel DMA controller entity related
34 * atc_ / atchan : ATmel DMA Channel entity related
35 */
36
37#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
38#define ATC_DEFAULT_CTRLA (0)
39#define ATC_DEFAULT_CTRLB (ATC_SIF(0) \
40 |ATC_DIF(1))
41
42/*
43 * Initial number of descriptors to allocate for each channel. This could
44 * be increased during dma usage.
45 */
46static unsigned int init_nr_desc_per_channel = 64;
47module_param(init_nr_desc_per_channel, uint, 0644);
48MODULE_PARM_DESC(init_nr_desc_per_channel,
49 "initial descriptors per channel (default: 64)");
50
51
52/* prototypes */
53static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
54
55
56/*----------------------------------------------------------------------*/
57
58static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
59{
60 return list_first_entry(&atchan->active_list,
61 struct at_desc, desc_node);
62}
63
64static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
65{
66 return list_first_entry(&atchan->queue,
67 struct at_desc, desc_node);
68}
69
70/**
71 * atc_alloc_descriptor - allocate and return an initilized descriptor
72 * @chan: the channel to allocate descriptors for
73 * @gfp_flags: GFP allocation flags
74 *
75 * Note: The ack-bit is positioned in the descriptor flag at creation time
76 * to make initial allocation more convenient. This bit will be cleared
77 * and control will be given to client at usage time (during
78 * preparation functions).
79 */
80static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
81 gfp_t gfp_flags)
82{
83 struct at_desc *desc = NULL;
84 struct at_dma *atdma = to_at_dma(chan->device);
85 dma_addr_t phys;
86
87 desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
88 if (desc) {
89 memset(desc, 0, sizeof(struct at_desc));
90 dma_async_tx_descriptor_init(&desc->txd, chan);
91 /* txd.flags will be overwritten in prep functions */
92 desc->txd.flags = DMA_CTRL_ACK;
93 desc->txd.tx_submit = atc_tx_submit;
94 desc->txd.phys = phys;
95 }
96
97 return desc;
98}
99
100/**
101 * atc_desc_get - get a unsused descriptor from free_list
102 * @atchan: channel we want a new descriptor for
103 */
104static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
105{
106 struct at_desc *desc, *_desc;
107 struct at_desc *ret = NULL;
108 unsigned int i = 0;
109 LIST_HEAD(tmp_list);
110
111 spin_lock_bh(&atchan->lock);
112 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
113 i++;
114 if (async_tx_test_ack(&desc->txd)) {
115 list_del(&desc->desc_node);
116 ret = desc;
117 break;
118 }
119 dev_dbg(chan2dev(&atchan->chan_common),
120 "desc %p not ACKed\n", desc);
121 }
122 spin_unlock_bh(&atchan->lock);
123 dev_vdbg(chan2dev(&atchan->chan_common),
124 "scanned %u descriptors on freelist\n", i);
125
126 /* no more descriptor available in initial pool: create one more */
127 if (!ret) {
128 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
129 if (ret) {
130 spin_lock_bh(&atchan->lock);
131 atchan->descs_allocated++;
132 spin_unlock_bh(&atchan->lock);
133 } else {
134 dev_err(chan2dev(&atchan->chan_common),
135 "not enough descriptors available\n");
136 }
137 }
138
139 return ret;
140}
141
142/**
143 * atc_desc_put - move a descriptor, including any children, to the free list
144 * @atchan: channel we work on
145 * @desc: descriptor, at the head of a chain, to move to free list
146 */
147static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
148{
149 if (desc) {
150 struct at_desc *child;
151
152 spin_lock_bh(&atchan->lock);
153 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
154 dev_vdbg(chan2dev(&atchan->chan_common),
155 "moving child desc %p to freelist\n",
156 child);
157 list_splice_init(&desc->txd.tx_list, &atchan->free_list);
158 dev_vdbg(chan2dev(&atchan->chan_common),
159 "moving desc %p to freelist\n", desc);
160 list_add(&desc->desc_node, &atchan->free_list);
161 spin_unlock_bh(&atchan->lock);
162 }
163}
164
165/**
166 * atc_assign_cookie - compute and assign new cookie
167 * @atchan: channel we work on
168 * @desc: descriptor to asign cookie for
169 *
170 * Called with atchan->lock held and bh disabled
171 */
172static dma_cookie_t
173atc_assign_cookie(struct at_dma_chan *atchan, struct at_desc *desc)
174{
175 dma_cookie_t cookie = atchan->chan_common.cookie;
176
177 if (++cookie < 0)
178 cookie = 1;
179
180 atchan->chan_common.cookie = cookie;
181 desc->txd.cookie = cookie;
182
183 return cookie;
184}
185
186/**
187 * atc_dostart - starts the DMA engine for real
188 * @atchan: the channel we want to start
189 * @first: first descriptor in the list we want to begin with
190 *
191 * Called with atchan->lock held and bh disabled
192 */
193static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
194{
195 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
196
197 /* ASSERT: channel is idle */
198 if (atc_chan_is_enabled(atchan)) {
199 dev_err(chan2dev(&atchan->chan_common),
200 "BUG: Attempted to start non-idle channel\n");
201 dev_err(chan2dev(&atchan->chan_common),
202 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
203 channel_readl(atchan, SADDR),
204 channel_readl(atchan, DADDR),
205 channel_readl(atchan, CTRLA),
206 channel_readl(atchan, CTRLB),
207 channel_readl(atchan, DSCR));
208
209 /* The tasklet will hopefully advance the queue... */
210 return;
211 }
212
213 vdbg_dump_regs(atchan);
214
215 /* clear any pending interrupt */
216 while (dma_readl(atdma, EBCISR))
217 cpu_relax();
218
219 channel_writel(atchan, SADDR, 0);
220 channel_writel(atchan, DADDR, 0);
221 channel_writel(atchan, CTRLA, 0);
222 channel_writel(atchan, CTRLB, 0);
223 channel_writel(atchan, DSCR, first->txd.phys);
224 dma_writel(atdma, CHER, atchan->mask);
225
226 vdbg_dump_regs(atchan);
227}
228
229/**
230 * atc_chain_complete - finish work for one transaction chain
231 * @atchan: channel we work on
232 * @desc: descriptor at the head of the chain we want do complete
233 *
234 * Called with atchan->lock held and bh disabled */
235static void
236atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
237{
238 dma_async_tx_callback callback;
239 void *param;
240 struct dma_async_tx_descriptor *txd = &desc->txd;
241
242 dev_vdbg(chan2dev(&atchan->chan_common),
243 "descriptor %u complete\n", txd->cookie);
244
245 atchan->completed_cookie = txd->cookie;
246 callback = txd->callback;
247 param = txd->callback_param;
248
249 /* move children to free_list */
250 list_splice_init(&txd->tx_list, &atchan->free_list);
251 /* move myself to free_list */
252 list_move(&desc->desc_node, &atchan->free_list);
253
254 /* unmap dma addresses */
255 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
256 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
257 dma_unmap_single(chan2parent(&atchan->chan_common),
258 desc->lli.daddr,
259 desc->len, DMA_FROM_DEVICE);
260 else
261 dma_unmap_page(chan2parent(&atchan->chan_common),
262 desc->lli.daddr,
263 desc->len, DMA_FROM_DEVICE);
264 }
265 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
266 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
267 dma_unmap_single(chan2parent(&atchan->chan_common),
268 desc->lli.saddr,
269 desc->len, DMA_TO_DEVICE);
270 else
271 dma_unmap_page(chan2parent(&atchan->chan_common),
272 desc->lli.saddr,
273 desc->len, DMA_TO_DEVICE);
274 }
275
276 /*
277 * The API requires that no submissions are done from a
278 * callback, so we don't need to drop the lock here
279 */
280 if (callback)
281 callback(param);
282
283 dma_run_dependencies(txd);
284}
285
286/**
287 * atc_complete_all - finish work for all transactions
288 * @atchan: channel to complete transactions for
289 *
290 * Eventually submit queued descriptors if any
291 *
292 * Assume channel is idle while calling this function
293 * Called with atchan->lock held and bh disabled
294 */
295static void atc_complete_all(struct at_dma_chan *atchan)
296{
297 struct at_desc *desc, *_desc;
298 LIST_HEAD(list);
299
300 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
301
302 BUG_ON(atc_chan_is_enabled(atchan));
303
304 /*
305 * Submit queued descriptors ASAP, i.e. before we go through
306 * the completed ones.
307 */
308 if (!list_empty(&atchan->queue))
309 atc_dostart(atchan, atc_first_queued(atchan));
310 /* empty active_list now it is completed */
311 list_splice_init(&atchan->active_list, &list);
312 /* empty queue list by moving descriptors (if any) to active_list */
313 list_splice_init(&atchan->queue, &atchan->active_list);
314
315 list_for_each_entry_safe(desc, _desc, &list, desc_node)
316 atc_chain_complete(atchan, desc);
317}
318
319/**
320 * atc_cleanup_descriptors - cleanup up finished descriptors in active_list
321 * @atchan: channel to be cleaned up
322 *
323 * Called with atchan->lock held and bh disabled
324 */
325static void atc_cleanup_descriptors(struct at_dma_chan *atchan)
326{
327 struct at_desc *desc, *_desc;
328 struct at_desc *child;
329
330 dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n");
331
332 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
333 if (!(desc->lli.ctrla & ATC_DONE))
334 /* This one is currently in progress */
335 return;
336
337 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
338 if (!(child->lli.ctrla & ATC_DONE))
339 /* Currently in progress */
340 return;
341
342 /*
343 * No descriptors so far seem to be in progress, i.e.
344 * this chain must be done.
345 */
346 atc_chain_complete(atchan, desc);
347 }
348}
349
350/**
351 * atc_advance_work - at the end of a transaction, move forward
352 * @atchan: channel where the transaction ended
353 *
354 * Called with atchan->lock held and bh disabled
355 */
356static void atc_advance_work(struct at_dma_chan *atchan)
357{
358 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
359
360 if (list_empty(&atchan->active_list) ||
361 list_is_singular(&atchan->active_list)) {
362 atc_complete_all(atchan);
363 } else {
364 atc_chain_complete(atchan, atc_first_active(atchan));
365 /* advance work */
366 atc_dostart(atchan, atc_first_active(atchan));
367 }
368}
369
370
371/**
372 * atc_handle_error - handle errors reported by DMA controller
373 * @atchan: channel where error occurs
374 *
375 * Called with atchan->lock held and bh disabled
376 */
377static void atc_handle_error(struct at_dma_chan *atchan)
378{
379 struct at_desc *bad_desc;
380 struct at_desc *child;
381
382 /*
383 * The descriptor currently at the head of the active list is
384 * broked. Since we don't have any way to report errors, we'll
385 * just have to scream loudly and try to carry on.
386 */
387 bad_desc = atc_first_active(atchan);
388 list_del_init(&bad_desc->desc_node);
389
390 /* As we are stopped, take advantage to push queued descriptors
391 * in active_list */
392 list_splice_init(&atchan->queue, atchan->active_list.prev);
393
394 /* Try to restart the controller */
395 if (!list_empty(&atchan->active_list))
396 atc_dostart(atchan, atc_first_active(atchan));
397
398 /*
399 * KERN_CRITICAL may seem harsh, but since this only happens
400 * when someone submits a bad physical address in a
401 * descriptor, we should consider ourselves lucky that the
402 * controller flagged an error instead of scribbling over
403 * random memory locations.
404 */
405 dev_crit(chan2dev(&atchan->chan_common),
406 "Bad descriptor submitted for DMA!\n");
407 dev_crit(chan2dev(&atchan->chan_common),
408 " cookie: %d\n", bad_desc->txd.cookie);
409 atc_dump_lli(atchan, &bad_desc->lli);
410 list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node)
411 atc_dump_lli(atchan, &child->lli);
412
413 /* Pretend the descriptor completed successfully */
414 atc_chain_complete(atchan, bad_desc);
415}
416
417
418/*-- IRQ & Tasklet ---------------------------------------------------*/
419
420static void atc_tasklet(unsigned long data)
421{
422 struct at_dma_chan *atchan = (struct at_dma_chan *)data;
423
424 /* Channel cannot be enabled here */
425 if (atc_chan_is_enabled(atchan)) {
426 dev_err(chan2dev(&atchan->chan_common),
427 "BUG: channel enabled in tasklet\n");
428 return;
429 }
430
431 spin_lock(&atchan->lock);
432 if (test_and_clear_bit(0, &atchan->error_status))
433 atc_handle_error(atchan);
434 else
435 atc_advance_work(atchan);
436
437 spin_unlock(&atchan->lock);
438}
439
440static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
441{
442 struct at_dma *atdma = (struct at_dma *)dev_id;
443 struct at_dma_chan *atchan;
444 int i;
445 u32 status, pending, imr;
446 int ret = IRQ_NONE;
447
448 do {
449 imr = dma_readl(atdma, EBCIMR);
450 status = dma_readl(atdma, EBCISR);
451 pending = status & imr;
452
453 if (!pending)
454 break;
455
456 dev_vdbg(atdma->dma_common.dev,
457 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
458 status, imr, pending);
459
460 for (i = 0; i < atdma->dma_common.chancnt; i++) {
461 atchan = &atdma->chan[i];
462 if (pending & (AT_DMA_CBTC(i) | AT_DMA_ERR(i))) {
463 if (pending & AT_DMA_ERR(i)) {
464 /* Disable channel on AHB error */
465 dma_writel(atdma, CHDR, atchan->mask);
466 /* Give information to tasklet */
467 set_bit(0, &atchan->error_status);
468 }
469 tasklet_schedule(&atchan->tasklet);
470 ret = IRQ_HANDLED;
471 }
472 }
473
474 } while (pending);
475
476 return ret;
477}
478
479
480/*-- DMA Engine API --------------------------------------------------*/
481
482/**
483 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
484 * @desc: descriptor at the head of the transaction chain
485 *
486 * Queue chain if DMA engine is working already
487 *
488 * Cookie increment and adding to active_list or queue must be atomic
489 */
490static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
491{
492 struct at_desc *desc = txd_to_at_desc(tx);
493 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
494 dma_cookie_t cookie;
495
496 spin_lock_bh(&atchan->lock);
497 cookie = atc_assign_cookie(atchan, desc);
498
499 if (list_empty(&atchan->active_list)) {
500 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
501 desc->txd.cookie);
502 atc_dostart(atchan, desc);
503 list_add_tail(&desc->desc_node, &atchan->active_list);
504 } else {
505 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
506 desc->txd.cookie);
507 list_add_tail(&desc->desc_node, &atchan->queue);
508 }
509
510 spin_unlock_bh(&atchan->lock);
511
512 return cookie;
513}
514
515/**
516 * atc_prep_dma_memcpy - prepare a memcpy operation
517 * @chan: the channel to prepare operation on
518 * @dest: operation virtual destination address
519 * @src: operation virtual source address
520 * @len: operation length
521 * @flags: tx descriptor status flags
522 */
523static struct dma_async_tx_descriptor *
524atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
525 size_t len, unsigned long flags)
526{
527 struct at_dma_chan *atchan = to_at_dma_chan(chan);
528 struct at_desc *desc = NULL;
529 struct at_desc *first = NULL;
530 struct at_desc *prev = NULL;
531 size_t xfer_count;
532 size_t offset;
533 unsigned int src_width;
534 unsigned int dst_width;
535 u32 ctrla;
536 u32 ctrlb;
537
538 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
539 dest, src, len, flags);
540
541 if (unlikely(!len)) {
542 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
543 return NULL;
544 }
545
546 ctrla = ATC_DEFAULT_CTRLA;
547 ctrlb = ATC_DEFAULT_CTRLB
548 | ATC_SRC_ADDR_MODE_INCR
549 | ATC_DST_ADDR_MODE_INCR
550 | ATC_FC_MEM2MEM;
551
552 /*
553 * We can be a lot more clever here, but this should take care
554 * of the most common optimization.
555 */
556 if (!((src | dest | len) & 3)) {
557 ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
558 src_width = dst_width = 2;
559 } else if (!((src | dest | len) & 1)) {
560 ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
561 src_width = dst_width = 1;
562 } else {
563 ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
564 src_width = dst_width = 0;
565 }
566
567 for (offset = 0; offset < len; offset += xfer_count << src_width) {
568 xfer_count = min_t(size_t, (len - offset) >> src_width,
569 ATC_BTSIZE_MAX);
570
571 desc = atc_desc_get(atchan);
572 if (!desc)
573 goto err_desc_get;
574
575 desc->lli.saddr = src + offset;
576 desc->lli.daddr = dest + offset;
577 desc->lli.ctrla = ctrla | xfer_count;
578 desc->lli.ctrlb = ctrlb;
579
580 desc->txd.cookie = 0;
581 async_tx_ack(&desc->txd);
582
583 if (!first) {
584 first = desc;
585 } else {
586 /* inform the HW lli about chaining */
587 prev->lli.dscr = desc->txd.phys;
588 /* insert the link descriptor to the LD ring */
589 list_add_tail(&desc->desc_node,
590 &first->txd.tx_list);
591 }
592 prev = desc;
593 }
594
595 /* First descriptor of the chain embedds additional information */
596 first->txd.cookie = -EBUSY;
597 first->len = len;
598
599 /* set end-of-link to the last link descriptor of list*/
600 set_desc_eol(desc);
601
602 desc->txd.flags = flags; /* client is in control of this ack */
603
604 return &first->txd;
605
606err_desc_get:
607 atc_desc_put(atchan, first);
608 return NULL;
609}
610
611/**
612 * atc_is_tx_complete - poll for transaction completion
613 * @chan: DMA channel
614 * @cookie: transaction identifier to check status of
615 * @done: if not %NULL, updated with last completed transaction
616 * @used: if not %NULL, updated with last used transaction
617 *
618 * If @done and @used are passed in, upon return they reflect the driver
619 * internal state and can be used with dma_async_is_complete() to check
620 * the status of multiple cookies without re-checking hardware state.
621 */
622static enum dma_status
623atc_is_tx_complete(struct dma_chan *chan,
624 dma_cookie_t cookie,
625 dma_cookie_t *done, dma_cookie_t *used)
626{
627 struct at_dma_chan *atchan = to_at_dma_chan(chan);
628 dma_cookie_t last_used;
629 dma_cookie_t last_complete;
630 enum dma_status ret;
631
632 dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n",
633 cookie, done ? *done : 0, used ? *used : 0);
634
635 spin_lock_bh(atchan->lock);
636
637 last_complete = atchan->completed_cookie;
638 last_used = chan->cookie;
639
640 ret = dma_async_is_complete(cookie, last_complete, last_used);
641 if (ret != DMA_SUCCESS) {
642 atc_cleanup_descriptors(atchan);
643
644 last_complete = atchan->completed_cookie;
645 last_used = chan->cookie;
646
647 ret = dma_async_is_complete(cookie, last_complete, last_used);
648 }
649
650 spin_unlock_bh(atchan->lock);
651
652 if (done)
653 *done = last_complete;
654 if (used)
655 *used = last_used;
656
657 return ret;
658}
659
660/**
661 * atc_issue_pending - try to finish work
662 * @chan: target DMA channel
663 */
664static void atc_issue_pending(struct dma_chan *chan)
665{
666 struct at_dma_chan *atchan = to_at_dma_chan(chan);
667
668 dev_vdbg(chan2dev(chan), "issue_pending\n");
669
670 if (!atc_chan_is_enabled(atchan)) {
671 spin_lock_bh(&atchan->lock);
672 atc_advance_work(atchan);
673 spin_unlock_bh(&atchan->lock);
674 }
675}
676
677/**
678 * atc_alloc_chan_resources - allocate resources for DMA channel
679 * @chan: allocate descriptor resources for this channel
680 * @client: current client requesting the channel be ready for requests
681 *
682 * return - the number of allocated descriptors
683 */
684static int atc_alloc_chan_resources(struct dma_chan *chan)
685{
686 struct at_dma_chan *atchan = to_at_dma_chan(chan);
687 struct at_dma *atdma = to_at_dma(chan->device);
688 struct at_desc *desc;
689 int i;
690 LIST_HEAD(tmp_list);
691
692 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
693
694 /* ASSERT: channel is idle */
695 if (atc_chan_is_enabled(atchan)) {
696 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
697 return -EIO;
698 }
699
700 /* have we already been set up? */
701 if (!list_empty(&atchan->free_list))
702 return atchan->descs_allocated;
703
704 /* Allocate initial pool of descriptors */
705 for (i = 0; i < init_nr_desc_per_channel; i++) {
706 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
707 if (!desc) {
708 dev_err(atdma->dma_common.dev,
709 "Only %d initial descriptors\n", i);
710 break;
711 }
712 list_add_tail(&desc->desc_node, &tmp_list);
713 }
714
715 spin_lock_bh(&atchan->lock);
716 atchan->descs_allocated = i;
717 list_splice(&tmp_list, &atchan->free_list);
718 atchan->completed_cookie = chan->cookie = 1;
719 spin_unlock_bh(&atchan->lock);
720
721 /* channel parameters */
722 channel_writel(atchan, CFG, ATC_DEFAULT_CFG);
723
724 dev_dbg(chan2dev(chan),
725 "alloc_chan_resources: allocated %d descriptors\n",
726 atchan->descs_allocated);
727
728 return atchan->descs_allocated;
729}
730
731/**
732 * atc_free_chan_resources - free all channel resources
733 * @chan: DMA channel
734 */
735static void atc_free_chan_resources(struct dma_chan *chan)
736{
737 struct at_dma_chan *atchan = to_at_dma_chan(chan);
738 struct at_dma *atdma = to_at_dma(chan->device);
739 struct at_desc *desc, *_desc;
740 LIST_HEAD(list);
741
742 dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
743 atchan->descs_allocated);
744
745 /* ASSERT: channel is idle */
746 BUG_ON(!list_empty(&atchan->active_list));
747 BUG_ON(!list_empty(&atchan->queue));
748 BUG_ON(atc_chan_is_enabled(atchan));
749
750 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
751 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
752 list_del(&desc->desc_node);
753 /* free link descriptor */
754 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
755 }
756 list_splice_init(&atchan->free_list, &list);
757 atchan->descs_allocated = 0;
758
759 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
760}
761
762
763/*-- Module Management -----------------------------------------------*/
764
765/**
766 * at_dma_off - disable DMA controller
767 * @atdma: the Atmel HDAMC device
768 */
769static void at_dma_off(struct at_dma *atdma)
770{
771 dma_writel(atdma, EN, 0);
772
773 /* disable all interrupts */
774 dma_writel(atdma, EBCIDR, -1L);
775
776 /* confirm that all channels are disabled */
777 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
778 cpu_relax();
779}
780
781static int __init at_dma_probe(struct platform_device *pdev)
782{
783 struct at_dma_platform_data *pdata;
784 struct resource *io;
785 struct at_dma *atdma;
786 size_t size;
787 int irq;
788 int err;
789 int i;
790
791 /* get DMA Controller parameters from platform */
792 pdata = pdev->dev.platform_data;
793 if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS)
794 return -EINVAL;
795
796 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
797 if (!io)
798 return -EINVAL;
799
800 irq = platform_get_irq(pdev, 0);
801 if (irq < 0)
802 return irq;
803
804 size = sizeof(struct at_dma);
805 size += pdata->nr_channels * sizeof(struct at_dma_chan);
806 atdma = kzalloc(size, GFP_KERNEL);
807 if (!atdma)
808 return -ENOMEM;
809
810 /* discover transaction capabilites from the platform data */
811 atdma->dma_common.cap_mask = pdata->cap_mask;
812 atdma->all_chan_mask = (1 << pdata->nr_channels) - 1;
813
814 size = io->end - io->start + 1;
815 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
816 err = -EBUSY;
817 goto err_kfree;
818 }
819
820 atdma->regs = ioremap(io->start, size);
821 if (!atdma->regs) {
822 err = -ENOMEM;
823 goto err_release_r;
824 }
825
826 atdma->clk = clk_get(&pdev->dev, "dma_clk");
827 if (IS_ERR(atdma->clk)) {
828 err = PTR_ERR(atdma->clk);
829 goto err_clk;
830 }
831 clk_enable(atdma->clk);
832
833 /* force dma off, just in case */
834 at_dma_off(atdma);
835
836 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
837 if (err)
838 goto err_irq;
839
840 platform_set_drvdata(pdev, atdma);
841
842 /* create a pool of consistent memory blocks for hardware descriptors */
843 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
844 &pdev->dev, sizeof(struct at_desc),
845 4 /* word alignment */, 0);
846 if (!atdma->dma_desc_pool) {
847 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
848 err = -ENOMEM;
849 goto err_pool_create;
850 }
851
852 /* clear any pending interrupt */
853 while (dma_readl(atdma, EBCISR))
854 cpu_relax();
855
856 /* initialize channels related values */
857 INIT_LIST_HEAD(&atdma->dma_common.channels);
858 for (i = 0; i < pdata->nr_channels; i++, atdma->dma_common.chancnt++) {
859 struct at_dma_chan *atchan = &atdma->chan[i];
860
861 atchan->chan_common.device = &atdma->dma_common;
862 atchan->chan_common.cookie = atchan->completed_cookie = 1;
863 atchan->chan_common.chan_id = i;
864 list_add_tail(&atchan->chan_common.device_node,
865 &atdma->dma_common.channels);
866
867 atchan->ch_regs = atdma->regs + ch_regs(i);
868 spin_lock_init(&atchan->lock);
869 atchan->mask = 1 << i;
870
871 INIT_LIST_HEAD(&atchan->active_list);
872 INIT_LIST_HEAD(&atchan->queue);
873 INIT_LIST_HEAD(&atchan->free_list);
874
875 tasklet_init(&atchan->tasklet, atc_tasklet,
876 (unsigned long)atchan);
877 atc_enable_irq(atchan);
878 }
879
880 /* set base routines */
881 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
882 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
883 atdma->dma_common.device_is_tx_complete = atc_is_tx_complete;
884 atdma->dma_common.device_issue_pending = atc_issue_pending;
885 atdma->dma_common.dev = &pdev->dev;
886
887 /* set prep routines based on capability */
888 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
889 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
890
891 dma_writel(atdma, EN, AT_DMA_ENABLE);
892
893 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
894 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
895 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
896 atdma->dma_common.chancnt);
897
898 dma_async_device_register(&atdma->dma_common);
899
900 return 0;
901
902err_pool_create:
903 platform_set_drvdata(pdev, NULL);
904 free_irq(platform_get_irq(pdev, 0), atdma);
905err_irq:
906 clk_disable(atdma->clk);
907 clk_put(atdma->clk);
908err_clk:
909 iounmap(atdma->regs);
910 atdma->regs = NULL;
911err_release_r:
912 release_mem_region(io->start, size);
913err_kfree:
914 kfree(atdma);
915 return err;
916}
917
918static int __exit at_dma_remove(struct platform_device *pdev)
919{
920 struct at_dma *atdma = platform_get_drvdata(pdev);
921 struct dma_chan *chan, *_chan;
922 struct resource *io;
923
924 at_dma_off(atdma);
925 dma_async_device_unregister(&atdma->dma_common);
926
927 dma_pool_destroy(atdma->dma_desc_pool);
928 platform_set_drvdata(pdev, NULL);
929 free_irq(platform_get_irq(pdev, 0), atdma);
930
931 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
932 device_node) {
933 struct at_dma_chan *atchan = to_at_dma_chan(chan);
934
935 /* Disable interrupts */
936 atc_disable_irq(atchan);
937 tasklet_disable(&atchan->tasklet);
938
939 tasklet_kill(&atchan->tasklet);
940 list_del(&chan->device_node);
941 }
942
943 clk_disable(atdma->clk);
944 clk_put(atdma->clk);
945
946 iounmap(atdma->regs);
947 atdma->regs = NULL;
948
949 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
950 release_mem_region(io->start, io->end - io->start + 1);
951
952 kfree(atdma);
953
954 return 0;
955}
956
957static void at_dma_shutdown(struct platform_device *pdev)
958{
959 struct at_dma *atdma = platform_get_drvdata(pdev);
960
961 at_dma_off(platform_get_drvdata(pdev));
962 clk_disable(atdma->clk);
963}
964
965static int at_dma_suspend_late(struct platform_device *pdev, pm_message_t mesg)
966{
967 struct at_dma *atdma = platform_get_drvdata(pdev);
968
969 at_dma_off(platform_get_drvdata(pdev));
970 clk_disable(atdma->clk);
971 return 0;
972}
973
974static int at_dma_resume_early(struct platform_device *pdev)
975{
976 struct at_dma *atdma = platform_get_drvdata(pdev);
977
978 clk_enable(atdma->clk);
979 dma_writel(atdma, EN, AT_DMA_ENABLE);
980 return 0;
981
982}
983
984static struct platform_driver at_dma_driver = {
985 .remove = __exit_p(at_dma_remove),
986 .shutdown = at_dma_shutdown,
987 .suspend_late = at_dma_suspend_late,
988 .resume_early = at_dma_resume_early,
989 .driver = {
990 .name = "at_hdmac",
991 },
992};
993
994static int __init at_dma_init(void)
995{
996 return platform_driver_probe(&at_dma_driver, at_dma_probe);
997}
998module_init(at_dma_init);
999
1000static void __exit at_dma_exit(void)
1001{
1002 platform_driver_unregister(&at_dma_driver);
1003}
1004module_exit(at_dma_exit);
1005
1006MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
1007MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
1008MODULE_LICENSE("GPL");
1009MODULE_ALIAS("platform:at_hdmac");