aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicolas Ferre <nicolas.ferre@atmel.com>2009-07-03 13:24:33 -0400
committerDan Williams <dan.j.williams@intel.com>2009-07-23 01:41:27 -0400
commitdc78baa2b90b289590911b40b6800f77d0dc935a (patch)
treedb54dedb1e13a413190ad637ccaf6f5557dc9c10
parentf1aef8b6e6abf32a3a269542f95a19e2cb319f6c (diff)
dmaengine: at_hdmac: new driver for the Atmel AHB DMA Controller
This AHB DMA Controller (aka HDMA or DMAC on AT91 systems) is availlable on at91sam9rl chip. It will be used on other products in the future. This first release covers only the memory-to-memory tranfer type. This is the only tranfer type supported by this chip. On other products, it will be used also for peripheral DMA transfer (slave API support to come). I used dmatest client without problem in different configurations to test it. Full documentation for this controller can be found in the SAM9RL datasheet: http://www.atmel.com/dyn/products/product_card.asp?part_id=4243 Signed-off-by: Nicolas Ferre <nicolas.ferre@atmel.com> Acked-by: Maciej Sosnowski <maciej.sosnowski@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--arch/arm/mach-at91/include/mach/at_hdmac.h26
-rw-r--r--drivers/dma/Kconfig8
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/at_hdmac.c1009
-rw-r--r--drivers/dma/at_hdmac_regs.h386
5 files changed, 1430 insertions, 0 deletions
diff --git a/arch/arm/mach-at91/include/mach/at_hdmac.h b/arch/arm/mach-at91/include/mach/at_hdmac.h
new file mode 100644
index 000000000000..21a5554f9cb8
--- /dev/null
+++ b/arch/arm/mach-at91/include/mach/at_hdmac.h
@@ -0,0 +1,26 @@
1/*
2 * Header file for the Atmel AHB DMA Controller driver
3 *
4 * Copyright (C) 2008 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11#ifndef AT_HDMAC_H
12#define AT_HDMAC_H
13
14#include <linux/dmaengine.h>
15
16/**
17 * struct at_dma_platform_data - Controller configuration parameters
18 * @nr_channels: Number of channels supported by hardware (max 8)
19 * @cap_mask: dma_capability flags supported by the platform
20 */
21struct at_dma_platform_data {
22 unsigned int nr_channels;
23 dma_cap_mask_t cap_mask;
24};
25
26#endif /* AT_HDMAC_H */
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index babf214a509b..bc8fb41cd623 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -46,6 +46,14 @@ config DW_DMAC
46 Support the Synopsys DesignWare AHB DMA controller. This 46 Support the Synopsys DesignWare AHB DMA controller. This
47 can be integrated in chips such as the Atmel AT32ap7000. 47 can be integrated in chips such as the Atmel AT32ap7000.
48 48
49config AT_HDMAC
50 tristate "Atmel AHB DMA support"
51 depends on ARCH_AT91SAM9RL
52 select DMA_ENGINE
53 help
54 Support the Atmel AHB DMA controller. This can be integrated in
55 chips such as the Atmel AT91SAM9RL.
56
49config FSL_DMA 57config FSL_DMA
50 tristate "Freescale Elo and Elo Plus DMA support" 58 tristate "Freescale Elo and Elo Plus DMA support"
51 depends on FSL_SOC 59 depends on FSL_SOC
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 2e5dc96700d2..d7bc5fd17d84 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -7,4 +7,5 @@ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
7obj-$(CONFIG_FSL_DMA) += fsldma.o 7obj-$(CONFIG_FSL_DMA) += fsldma.o
8obj-$(CONFIG_MV_XOR) += mv_xor.o 8obj-$(CONFIG_MV_XOR) += mv_xor.o
9obj-$(CONFIG_DW_DMAC) += dw_dmac.o 9obj-$(CONFIG_DW_DMAC) += dw_dmac.o
10obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
10obj-$(CONFIG_MX3_IPU) += ipu/ 11obj-$(CONFIG_MX3_IPU) += ipu/
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
new file mode 100644
index 000000000000..64dbf0ce128e
--- /dev/null
+++ b/drivers/dma/at_hdmac.c
@@ -0,0 +1,1009 @@
1/*
2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
3 *
4 * Copyright (C) 2008 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 *
12 * This supports the Atmel AHB DMA Controller,
13 *
14 * The driver has currently been tested with the Atmel AT91SAM9RL
15 * and AT91SAM9G45 series.
16 */
17
18#include <linux/clk.h>
19#include <linux/dmaengine.h>
20#include <linux/dma-mapping.h>
21#include <linux/dmapool.h>
22#include <linux/interrupt.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
25
26#include "at_hdmac_regs.h"
27
28/*
29 * Glossary
30 * --------
31 *
32 * at_hdmac : Name of the ATmel AHB DMA Controller
33 * at_dma_ / atdma : ATmel DMA controller entity related
34 * atc_ / atchan : ATmel DMA Channel entity related
35 */
36
37#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
38#define ATC_DEFAULT_CTRLA (0)
39#define ATC_DEFAULT_CTRLB (ATC_SIF(0) \
40 |ATC_DIF(1))
41
42/*
43 * Initial number of descriptors to allocate for each channel. This could
44 * be increased during dma usage.
45 */
46static unsigned int init_nr_desc_per_channel = 64;
47module_param(init_nr_desc_per_channel, uint, 0644);
48MODULE_PARM_DESC(init_nr_desc_per_channel,
49 "initial descriptors per channel (default: 64)");
50
51
52/* prototypes */
53static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
54
55
56/*----------------------------------------------------------------------*/
57
58static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
59{
60 return list_first_entry(&atchan->active_list,
61 struct at_desc, desc_node);
62}
63
64static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
65{
66 return list_first_entry(&atchan->queue,
67 struct at_desc, desc_node);
68}
69
70/**
71 * atc_alloc_descriptor - allocate and return an initilized descriptor
72 * @chan: the channel to allocate descriptors for
73 * @gfp_flags: GFP allocation flags
74 *
75 * Note: The ack-bit is positioned in the descriptor flag at creation time
76 * to make initial allocation more convenient. This bit will be cleared
77 * and control will be given to client at usage time (during
78 * preparation functions).
79 */
80static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
81 gfp_t gfp_flags)
82{
83 struct at_desc *desc = NULL;
84 struct at_dma *atdma = to_at_dma(chan->device);
85 dma_addr_t phys;
86
87 desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
88 if (desc) {
89 memset(desc, 0, sizeof(struct at_desc));
90 dma_async_tx_descriptor_init(&desc->txd, chan);
91 /* txd.flags will be overwritten in prep functions */
92 desc->txd.flags = DMA_CTRL_ACK;
93 desc->txd.tx_submit = atc_tx_submit;
94 desc->txd.phys = phys;
95 }
96
97 return desc;
98}
99
100/**
101 * atc_desc_get - get a unsused descriptor from free_list
102 * @atchan: channel we want a new descriptor for
103 */
104static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
105{
106 struct at_desc *desc, *_desc;
107 struct at_desc *ret = NULL;
108 unsigned int i = 0;
109 LIST_HEAD(tmp_list);
110
111 spin_lock_bh(&atchan->lock);
112 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
113 i++;
114 if (async_tx_test_ack(&desc->txd)) {
115 list_del(&desc->desc_node);
116 ret = desc;
117 break;
118 }
119 dev_dbg(chan2dev(&atchan->chan_common),
120 "desc %p not ACKed\n", desc);
121 }
122 spin_unlock_bh(&atchan->lock);
123 dev_vdbg(chan2dev(&atchan->chan_common),
124 "scanned %u descriptors on freelist\n", i);
125
126 /* no more descriptor available in initial pool: create one more */
127 if (!ret) {
128 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
129 if (ret) {
130 spin_lock_bh(&atchan->lock);
131 atchan->descs_allocated++;
132 spin_unlock_bh(&atchan->lock);
133 } else {
134 dev_err(chan2dev(&atchan->chan_common),
135 "not enough descriptors available\n");
136 }
137 }
138
139 return ret;
140}
141
142/**
143 * atc_desc_put - move a descriptor, including any children, to the free list
144 * @atchan: channel we work on
145 * @desc: descriptor, at the head of a chain, to move to free list
146 */
147static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
148{
149 if (desc) {
150 struct at_desc *child;
151
152 spin_lock_bh(&atchan->lock);
153 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
154 dev_vdbg(chan2dev(&atchan->chan_common),
155 "moving child desc %p to freelist\n",
156 child);
157 list_splice_init(&desc->txd.tx_list, &atchan->free_list);
158 dev_vdbg(chan2dev(&atchan->chan_common),
159 "moving desc %p to freelist\n", desc);
160 list_add(&desc->desc_node, &atchan->free_list);
161 spin_unlock_bh(&atchan->lock);
162 }
163}
164
165/**
166 * atc_assign_cookie - compute and assign new cookie
167 * @atchan: channel we work on
168 * @desc: descriptor to asign cookie for
169 *
170 * Called with atchan->lock held and bh disabled
171 */
172static dma_cookie_t
173atc_assign_cookie(struct at_dma_chan *atchan, struct at_desc *desc)
174{
175 dma_cookie_t cookie = atchan->chan_common.cookie;
176
177 if (++cookie < 0)
178 cookie = 1;
179
180 atchan->chan_common.cookie = cookie;
181 desc->txd.cookie = cookie;
182
183 return cookie;
184}
185
186/**
187 * atc_dostart - starts the DMA engine for real
188 * @atchan: the channel we want to start
189 * @first: first descriptor in the list we want to begin with
190 *
191 * Called with atchan->lock held and bh disabled
192 */
193static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
194{
195 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
196
197 /* ASSERT: channel is idle */
198 if (atc_chan_is_enabled(atchan)) {
199 dev_err(chan2dev(&atchan->chan_common),
200 "BUG: Attempted to start non-idle channel\n");
201 dev_err(chan2dev(&atchan->chan_common),
202 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
203 channel_readl(atchan, SADDR),
204 channel_readl(atchan, DADDR),
205 channel_readl(atchan, CTRLA),
206 channel_readl(atchan, CTRLB),
207 channel_readl(atchan, DSCR));
208
209 /* The tasklet will hopefully advance the queue... */
210 return;
211 }
212
213 vdbg_dump_regs(atchan);
214
215 /* clear any pending interrupt */
216 while (dma_readl(atdma, EBCISR))
217 cpu_relax();
218
219 channel_writel(atchan, SADDR, 0);
220 channel_writel(atchan, DADDR, 0);
221 channel_writel(atchan, CTRLA, 0);
222 channel_writel(atchan, CTRLB, 0);
223 channel_writel(atchan, DSCR, first->txd.phys);
224 dma_writel(atdma, CHER, atchan->mask);
225
226 vdbg_dump_regs(atchan);
227}
228
229/**
230 * atc_chain_complete - finish work for one transaction chain
231 * @atchan: channel we work on
232 * @desc: descriptor at the head of the chain we want do complete
233 *
234 * Called with atchan->lock held and bh disabled */
235static void
236atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
237{
238 dma_async_tx_callback callback;
239 void *param;
240 struct dma_async_tx_descriptor *txd = &desc->txd;
241
242 dev_vdbg(chan2dev(&atchan->chan_common),
243 "descriptor %u complete\n", txd->cookie);
244
245 atchan->completed_cookie = txd->cookie;
246 callback = txd->callback;
247 param = txd->callback_param;
248
249 /* move children to free_list */
250 list_splice_init(&txd->tx_list, &atchan->free_list);
251 /* move myself to free_list */
252 list_move(&desc->desc_node, &atchan->free_list);
253
254 /* unmap dma addresses */
255 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
256 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
257 dma_unmap_single(chan2parent(&atchan->chan_common),
258 desc->lli.daddr,
259 desc->len, DMA_FROM_DEVICE);
260 else
261 dma_unmap_page(chan2parent(&atchan->chan_common),
262 desc->lli.daddr,
263 desc->len, DMA_FROM_DEVICE);
264 }
265 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
266 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
267 dma_unmap_single(chan2parent(&atchan->chan_common),
268 desc->lli.saddr,
269 desc->len, DMA_TO_DEVICE);
270 else
271 dma_unmap_page(chan2parent(&atchan->chan_common),
272 desc->lli.saddr,
273 desc->len, DMA_TO_DEVICE);
274 }
275
276 /*
277 * The API requires that no submissions are done from a
278 * callback, so we don't need to drop the lock here
279 */
280 if (callback)
281 callback(param);
282
283 dma_run_dependencies(txd);
284}
285
286/**
287 * atc_complete_all - finish work for all transactions
288 * @atchan: channel to complete transactions for
289 *
290 * Eventually submit queued descriptors if any
291 *
292 * Assume channel is idle while calling this function
293 * Called with atchan->lock held and bh disabled
294 */
295static void atc_complete_all(struct at_dma_chan *atchan)
296{
297 struct at_desc *desc, *_desc;
298 LIST_HEAD(list);
299
300 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
301
302 BUG_ON(atc_chan_is_enabled(atchan));
303
304 /*
305 * Submit queued descriptors ASAP, i.e. before we go through
306 * the completed ones.
307 */
308 if (!list_empty(&atchan->queue))
309 atc_dostart(atchan, atc_first_queued(atchan));
310 /* empty active_list now it is completed */
311 list_splice_init(&atchan->active_list, &list);
312 /* empty queue list by moving descriptors (if any) to active_list */
313 list_splice_init(&atchan->queue, &atchan->active_list);
314
315 list_for_each_entry_safe(desc, _desc, &list, desc_node)
316 atc_chain_complete(atchan, desc);
317}
318
319/**
320 * atc_cleanup_descriptors - cleanup up finished descriptors in active_list
321 * @atchan: channel to be cleaned up
322 *
323 * Called with atchan->lock held and bh disabled
324 */
325static void atc_cleanup_descriptors(struct at_dma_chan *atchan)
326{
327 struct at_desc *desc, *_desc;
328 struct at_desc *child;
329
330 dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n");
331
332 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
333 if (!(desc->lli.ctrla & ATC_DONE))
334 /* This one is currently in progress */
335 return;
336
337 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
338 if (!(child->lli.ctrla & ATC_DONE))
339 /* Currently in progress */
340 return;
341
342 /*
343 * No descriptors so far seem to be in progress, i.e.
344 * this chain must be done.
345 */
346 atc_chain_complete(atchan, desc);
347 }
348}
349
350/**
351 * atc_advance_work - at the end of a transaction, move forward
352 * @atchan: channel where the transaction ended
353 *
354 * Called with atchan->lock held and bh disabled
355 */
356static void atc_advance_work(struct at_dma_chan *atchan)
357{
358 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
359
360 if (list_empty(&atchan->active_list) ||
361 list_is_singular(&atchan->active_list)) {
362 atc_complete_all(atchan);
363 } else {
364 atc_chain_complete(atchan, atc_first_active(atchan));
365 /* advance work */
366 atc_dostart(atchan, atc_first_active(atchan));
367 }
368}
369
370
371/**
372 * atc_handle_error - handle errors reported by DMA controller
373 * @atchan: channel where error occurs
374 *
375 * Called with atchan->lock held and bh disabled
376 */
377static void atc_handle_error(struct at_dma_chan *atchan)
378{
379 struct at_desc *bad_desc;
380 struct at_desc *child;
381
382 /*
383 * The descriptor currently at the head of the active list is
384 * broked. Since we don't have any way to report errors, we'll
385 * just have to scream loudly and try to carry on.
386 */
387 bad_desc = atc_first_active(atchan);
388 list_del_init(&bad_desc->desc_node);
389
390 /* As we are stopped, take advantage to push queued descriptors
391 * in active_list */
392 list_splice_init(&atchan->queue, atchan->active_list.prev);
393
394 /* Try to restart the controller */
395 if (!list_empty(&atchan->active_list))
396 atc_dostart(atchan, atc_first_active(atchan));
397
398 /*
399 * KERN_CRITICAL may seem harsh, but since this only happens
400 * when someone submits a bad physical address in a
401 * descriptor, we should consider ourselves lucky that the
402 * controller flagged an error instead of scribbling over
403 * random memory locations.
404 */
405 dev_crit(chan2dev(&atchan->chan_common),
406 "Bad descriptor submitted for DMA!\n");
407 dev_crit(chan2dev(&atchan->chan_common),
408 " cookie: %d\n", bad_desc->txd.cookie);
409 atc_dump_lli(atchan, &bad_desc->lli);
410 list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node)
411 atc_dump_lli(atchan, &child->lli);
412
413 /* Pretend the descriptor completed successfully */
414 atc_chain_complete(atchan, bad_desc);
415}
416
417
418/*-- IRQ & Tasklet ---------------------------------------------------*/
419
420static void atc_tasklet(unsigned long data)
421{
422 struct at_dma_chan *atchan = (struct at_dma_chan *)data;
423
424 /* Channel cannot be enabled here */
425 if (atc_chan_is_enabled(atchan)) {
426 dev_err(chan2dev(&atchan->chan_common),
427 "BUG: channel enabled in tasklet\n");
428 return;
429 }
430
431 spin_lock(&atchan->lock);
432 if (test_and_clear_bit(0, &atchan->error_status))
433 atc_handle_error(atchan);
434 else
435 atc_advance_work(atchan);
436
437 spin_unlock(&atchan->lock);
438}
439
440static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
441{
442 struct at_dma *atdma = (struct at_dma *)dev_id;
443 struct at_dma_chan *atchan;
444 int i;
445 u32 status, pending, imr;
446 int ret = IRQ_NONE;
447
448 do {
449 imr = dma_readl(atdma, EBCIMR);
450 status = dma_readl(atdma, EBCISR);
451 pending = status & imr;
452
453 if (!pending)
454 break;
455
456 dev_vdbg(atdma->dma_common.dev,
457 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
458 status, imr, pending);
459
460 for (i = 0; i < atdma->dma_common.chancnt; i++) {
461 atchan = &atdma->chan[i];
462 if (pending & (AT_DMA_CBTC(i) | AT_DMA_ERR(i))) {
463 if (pending & AT_DMA_ERR(i)) {
464 /* Disable channel on AHB error */
465 dma_writel(atdma, CHDR, atchan->mask);
466 /* Give information to tasklet */
467 set_bit(0, &atchan->error_status);
468 }
469 tasklet_schedule(&atchan->tasklet);
470 ret = IRQ_HANDLED;
471 }
472 }
473
474 } while (pending);
475
476 return ret;
477}
478
479
480/*-- DMA Engine API --------------------------------------------------*/
481
482/**
483 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
484 * @desc: descriptor at the head of the transaction chain
485 *
486 * Queue chain if DMA engine is working already
487 *
488 * Cookie increment and adding to active_list or queue must be atomic
489 */
490static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
491{
492 struct at_desc *desc = txd_to_at_desc(tx);
493 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
494 dma_cookie_t cookie;
495
496 spin_lock_bh(&atchan->lock);
497 cookie = atc_assign_cookie(atchan, desc);
498
499 if (list_empty(&atchan->active_list)) {
500 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
501 desc->txd.cookie);
502 atc_dostart(atchan, desc);
503 list_add_tail(&desc->desc_node, &atchan->active_list);
504 } else {
505 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
506 desc->txd.cookie);
507 list_add_tail(&desc->desc_node, &atchan->queue);
508 }
509
510 spin_unlock_bh(&atchan->lock);
511
512 return cookie;
513}
514
515/**
516 * atc_prep_dma_memcpy - prepare a memcpy operation
517 * @chan: the channel to prepare operation on
518 * @dest: operation virtual destination address
519 * @src: operation virtual source address
520 * @len: operation length
521 * @flags: tx descriptor status flags
522 */
523static struct dma_async_tx_descriptor *
524atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
525 size_t len, unsigned long flags)
526{
527 struct at_dma_chan *atchan = to_at_dma_chan(chan);
528 struct at_desc *desc = NULL;
529 struct at_desc *first = NULL;
530 struct at_desc *prev = NULL;
531 size_t xfer_count;
532 size_t offset;
533 unsigned int src_width;
534 unsigned int dst_width;
535 u32 ctrla;
536 u32 ctrlb;
537
538 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
539 dest, src, len, flags);
540
541 if (unlikely(!len)) {
542 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
543 return NULL;
544 }
545
546 ctrla = ATC_DEFAULT_CTRLA;
547 ctrlb = ATC_DEFAULT_CTRLB
548 | ATC_SRC_ADDR_MODE_INCR
549 | ATC_DST_ADDR_MODE_INCR
550 | ATC_FC_MEM2MEM;
551
552 /*
553 * We can be a lot more clever here, but this should take care
554 * of the most common optimization.
555 */
556 if (!((src | dest | len) & 3)) {
557 ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
558 src_width = dst_width = 2;
559 } else if (!((src | dest | len) & 1)) {
560 ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
561 src_width = dst_width = 1;
562 } else {
563 ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
564 src_width = dst_width = 0;
565 }
566
567 for (offset = 0; offset < len; offset += xfer_count << src_width) {
568 xfer_count = min_t(size_t, (len - offset) >> src_width,
569 ATC_BTSIZE_MAX);
570
571 desc = atc_desc_get(atchan);
572 if (!desc)
573 goto err_desc_get;
574
575 desc->lli.saddr = src + offset;
576 desc->lli.daddr = dest + offset;
577 desc->lli.ctrla = ctrla | xfer_count;
578 desc->lli.ctrlb = ctrlb;
579
580 desc->txd.cookie = 0;
581 async_tx_ack(&desc->txd);
582
583 if (!first) {
584 first = desc;
585 } else {
586 /* inform the HW lli about chaining */
587 prev->lli.dscr = desc->txd.phys;
588 /* insert the link descriptor to the LD ring */
589 list_add_tail(&desc->desc_node,
590 &first->txd.tx_list);
591 }
592 prev = desc;
593 }
594
595 /* First descriptor of the chain embedds additional information */
596 first->txd.cookie = -EBUSY;
597 first->len = len;
598
599 /* set end-of-link to the last link descriptor of list*/
600 set_desc_eol(desc);
601
602 desc->txd.flags = flags; /* client is in control of this ack */
603
604 return &first->txd;
605
606err_desc_get:
607 atc_desc_put(atchan, first);
608 return NULL;
609}
610
611/**
612 * atc_is_tx_complete - poll for transaction completion
613 * @chan: DMA channel
614 * @cookie: transaction identifier to check status of
615 * @done: if not %NULL, updated with last completed transaction
616 * @used: if not %NULL, updated with last used transaction
617 *
618 * If @done and @used are passed in, upon return they reflect the driver
619 * internal state and can be used with dma_async_is_complete() to check
620 * the status of multiple cookies without re-checking hardware state.
621 */
622static enum dma_status
623atc_is_tx_complete(struct dma_chan *chan,
624 dma_cookie_t cookie,
625 dma_cookie_t *done, dma_cookie_t *used)
626{
627 struct at_dma_chan *atchan = to_at_dma_chan(chan);
628 dma_cookie_t last_used;
629 dma_cookie_t last_complete;
630 enum dma_status ret;
631
632 dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n",
633 cookie, done ? *done : 0, used ? *used : 0);
634
635 spin_lock_bh(atchan->lock);
636
637 last_complete = atchan->completed_cookie;
638 last_used = chan->cookie;
639
640 ret = dma_async_is_complete(cookie, last_complete, last_used);
641 if (ret != DMA_SUCCESS) {
642 atc_cleanup_descriptors(atchan);
643
644 last_complete = atchan->completed_cookie;
645 last_used = chan->cookie;
646
647 ret = dma_async_is_complete(cookie, last_complete, last_used);
648 }
649
650 spin_unlock_bh(atchan->lock);
651
652 if (done)
653 *done = last_complete;
654 if (used)
655 *used = last_used;
656
657 return ret;
658}
659
660/**
661 * atc_issue_pending - try to finish work
662 * @chan: target DMA channel
663 */
664static void atc_issue_pending(struct dma_chan *chan)
665{
666 struct at_dma_chan *atchan = to_at_dma_chan(chan);
667
668 dev_vdbg(chan2dev(chan), "issue_pending\n");
669
670 if (!atc_chan_is_enabled(atchan)) {
671 spin_lock_bh(&atchan->lock);
672 atc_advance_work(atchan);
673 spin_unlock_bh(&atchan->lock);
674 }
675}
676
677/**
678 * atc_alloc_chan_resources - allocate resources for DMA channel
679 * @chan: allocate descriptor resources for this channel
680 * @client: current client requesting the channel be ready for requests
681 *
682 * return - the number of allocated descriptors
683 */
684static int atc_alloc_chan_resources(struct dma_chan *chan)
685{
686 struct at_dma_chan *atchan = to_at_dma_chan(chan);
687 struct at_dma *atdma = to_at_dma(chan->device);
688 struct at_desc *desc;
689 int i;
690 LIST_HEAD(tmp_list);
691
692 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
693
694 /* ASSERT: channel is idle */
695 if (atc_chan_is_enabled(atchan)) {
696 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
697 return -EIO;
698 }
699
700 /* have we already been set up? */
701 if (!list_empty(&atchan->free_list))
702 return atchan->descs_allocated;
703
704 /* Allocate initial pool of descriptors */
705 for (i = 0; i < init_nr_desc_per_channel; i++) {
706 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
707 if (!desc) {
708 dev_err(atdma->dma_common.dev,
709 "Only %d initial descriptors\n", i);
710 break;
711 }
712 list_add_tail(&desc->desc_node, &tmp_list);
713 }
714
715 spin_lock_bh(&atchan->lock);
716 atchan->descs_allocated = i;
717 list_splice(&tmp_list, &atchan->free_list);
718 atchan->completed_cookie = chan->cookie = 1;
719 spin_unlock_bh(&atchan->lock);
720
721 /* channel parameters */
722 channel_writel(atchan, CFG, ATC_DEFAULT_CFG);
723
724 dev_dbg(chan2dev(chan),
725 "alloc_chan_resources: allocated %d descriptors\n",
726 atchan->descs_allocated);
727
728 return atchan->descs_allocated;
729}
730
731/**
732 * atc_free_chan_resources - free all channel resources
733 * @chan: DMA channel
734 */
735static void atc_free_chan_resources(struct dma_chan *chan)
736{
737 struct at_dma_chan *atchan = to_at_dma_chan(chan);
738 struct at_dma *atdma = to_at_dma(chan->device);
739 struct at_desc *desc, *_desc;
740 LIST_HEAD(list);
741
742 dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
743 atchan->descs_allocated);
744
745 /* ASSERT: channel is idle */
746 BUG_ON(!list_empty(&atchan->active_list));
747 BUG_ON(!list_empty(&atchan->queue));
748 BUG_ON(atc_chan_is_enabled(atchan));
749
750 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
751 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
752 list_del(&desc->desc_node);
753 /* free link descriptor */
754 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
755 }
756 list_splice_init(&atchan->free_list, &list);
757 atchan->descs_allocated = 0;
758
759 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
760}
761
762
763/*-- Module Management -----------------------------------------------*/
764
765/**
766 * at_dma_off - disable DMA controller
767 * @atdma: the Atmel HDAMC device
768 */
769static void at_dma_off(struct at_dma *atdma)
770{
771 dma_writel(atdma, EN, 0);
772
773 /* disable all interrupts */
774 dma_writel(atdma, EBCIDR, -1L);
775
776 /* confirm that all channels are disabled */
777 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
778 cpu_relax();
779}
780
781static int __init at_dma_probe(struct platform_device *pdev)
782{
783 struct at_dma_platform_data *pdata;
784 struct resource *io;
785 struct at_dma *atdma;
786 size_t size;
787 int irq;
788 int err;
789 int i;
790
791 /* get DMA Controller parameters from platform */
792 pdata = pdev->dev.platform_data;
793 if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS)
794 return -EINVAL;
795
796 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
797 if (!io)
798 return -EINVAL;
799
800 irq = platform_get_irq(pdev, 0);
801 if (irq < 0)
802 return irq;
803
804 size = sizeof(struct at_dma);
805 size += pdata->nr_channels * sizeof(struct at_dma_chan);
806 atdma = kzalloc(size, GFP_KERNEL);
807 if (!atdma)
808 return -ENOMEM;
809
810 /* discover transaction capabilites from the platform data */
811 atdma->dma_common.cap_mask = pdata->cap_mask;
812 atdma->all_chan_mask = (1 << pdata->nr_channels) - 1;
813
814 size = io->end - io->start + 1;
815 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
816 err = -EBUSY;
817 goto err_kfree;
818 }
819
820 atdma->regs = ioremap(io->start, size);
821 if (!atdma->regs) {
822 err = -ENOMEM;
823 goto err_release_r;
824 }
825
826 atdma->clk = clk_get(&pdev->dev, "dma_clk");
827 if (IS_ERR(atdma->clk)) {
828 err = PTR_ERR(atdma->clk);
829 goto err_clk;
830 }
831 clk_enable(atdma->clk);
832
833 /* force dma off, just in case */
834 at_dma_off(atdma);
835
836 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
837 if (err)
838 goto err_irq;
839
840 platform_set_drvdata(pdev, atdma);
841
842 /* create a pool of consistent memory blocks for hardware descriptors */
843 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
844 &pdev->dev, sizeof(struct at_desc),
845 4 /* word alignment */, 0);
846 if (!atdma->dma_desc_pool) {
847 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
848 err = -ENOMEM;
849 goto err_pool_create;
850 }
851
852 /* clear any pending interrupt */
853 while (dma_readl(atdma, EBCISR))
854 cpu_relax();
855
856 /* initialize channels related values */
857 INIT_LIST_HEAD(&atdma->dma_common.channels);
858 for (i = 0; i < pdata->nr_channels; i++, atdma->dma_common.chancnt++) {
859 struct at_dma_chan *atchan = &atdma->chan[i];
860
861 atchan->chan_common.device = &atdma->dma_common;
862 atchan->chan_common.cookie = atchan->completed_cookie = 1;
863 atchan->chan_common.chan_id = i;
864 list_add_tail(&atchan->chan_common.device_node,
865 &atdma->dma_common.channels);
866
867 atchan->ch_regs = atdma->regs + ch_regs(i);
868 spin_lock_init(&atchan->lock);
869 atchan->mask = 1 << i;
870
871 INIT_LIST_HEAD(&atchan->active_list);
872 INIT_LIST_HEAD(&atchan->queue);
873 INIT_LIST_HEAD(&atchan->free_list);
874
875 tasklet_init(&atchan->tasklet, atc_tasklet,
876 (unsigned long)atchan);
877 atc_enable_irq(atchan);
878 }
879
880 /* set base routines */
881 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
882 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
883 atdma->dma_common.device_is_tx_complete = atc_is_tx_complete;
884 atdma->dma_common.device_issue_pending = atc_issue_pending;
885 atdma->dma_common.dev = &pdev->dev;
886
887 /* set prep routines based on capability */
888 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
889 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
890
891 dma_writel(atdma, EN, AT_DMA_ENABLE);
892
893 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
894 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
895 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
896 atdma->dma_common.chancnt);
897
898 dma_async_device_register(&atdma->dma_common);
899
900 return 0;
901
902err_pool_create:
903 platform_set_drvdata(pdev, NULL);
904 free_irq(platform_get_irq(pdev, 0), atdma);
905err_irq:
906 clk_disable(atdma->clk);
907 clk_put(atdma->clk);
908err_clk:
909 iounmap(atdma->regs);
910 atdma->regs = NULL;
911err_release_r:
912 release_mem_region(io->start, size);
913err_kfree:
914 kfree(atdma);
915 return err;
916}
917
918static int __exit at_dma_remove(struct platform_device *pdev)
919{
920 struct at_dma *atdma = platform_get_drvdata(pdev);
921 struct dma_chan *chan, *_chan;
922 struct resource *io;
923
924 at_dma_off(atdma);
925 dma_async_device_unregister(&atdma->dma_common);
926
927 dma_pool_destroy(atdma->dma_desc_pool);
928 platform_set_drvdata(pdev, NULL);
929 free_irq(platform_get_irq(pdev, 0), atdma);
930
931 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
932 device_node) {
933 struct at_dma_chan *atchan = to_at_dma_chan(chan);
934
935 /* Disable interrupts */
936 atc_disable_irq(atchan);
937 tasklet_disable(&atchan->tasklet);
938
939 tasklet_kill(&atchan->tasklet);
940 list_del(&chan->device_node);
941 }
942
943 clk_disable(atdma->clk);
944 clk_put(atdma->clk);
945
946 iounmap(atdma->regs);
947 atdma->regs = NULL;
948
949 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
950 release_mem_region(io->start, io->end - io->start + 1);
951
952 kfree(atdma);
953
954 return 0;
955}
956
957static void at_dma_shutdown(struct platform_device *pdev)
958{
959 struct at_dma *atdma = platform_get_drvdata(pdev);
960
961 at_dma_off(platform_get_drvdata(pdev));
962 clk_disable(atdma->clk);
963}
964
965static int at_dma_suspend_late(struct platform_device *pdev, pm_message_t mesg)
966{
967 struct at_dma *atdma = platform_get_drvdata(pdev);
968
969 at_dma_off(platform_get_drvdata(pdev));
970 clk_disable(atdma->clk);
971 return 0;
972}
973
974static int at_dma_resume_early(struct platform_device *pdev)
975{
976 struct at_dma *atdma = platform_get_drvdata(pdev);
977
978 clk_enable(atdma->clk);
979 dma_writel(atdma, EN, AT_DMA_ENABLE);
980 return 0;
981
982}
983
984static struct platform_driver at_dma_driver = {
985 .remove = __exit_p(at_dma_remove),
986 .shutdown = at_dma_shutdown,
987 .suspend_late = at_dma_suspend_late,
988 .resume_early = at_dma_resume_early,
989 .driver = {
990 .name = "at_hdmac",
991 },
992};
993
994static int __init at_dma_init(void)
995{
996 return platform_driver_probe(&at_dma_driver, at_dma_probe);
997}
998module_init(at_dma_init);
999
1000static void __exit at_dma_exit(void)
1001{
1002 platform_driver_unregister(&at_dma_driver);
1003}
1004module_exit(at_dma_exit);
1005
1006MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
1007MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
1008MODULE_LICENSE("GPL");
1009MODULE_ALIAS("platform:at_hdmac");
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
new file mode 100644
index 000000000000..ad2d4f402bf7
--- /dev/null
+++ b/drivers/dma/at_hdmac_regs.h
@@ -0,0 +1,386 @@
1/*
2 * Header file for the Atmel AHB DMA Controller driver
3 *
4 * Copyright (C) 2008 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11#ifndef AT_HDMAC_REGS_H
12#define AT_HDMAC_REGS_H
13
14#include <mach/at_hdmac.h>
15
16#define AT_DMA_MAX_NR_CHANNELS 8
17
18
19#define AT_DMA_GCFG 0x00 /* Global Configuration Register */
20#define AT_DMA_IF_BIGEND(i) (0x1 << (i)) /* AHB-Lite Interface i in Big-endian mode */
21#define AT_DMA_ARB_CFG (0x1 << 4) /* Arbiter mode. */
22#define AT_DMA_ARB_CFG_FIXED (0x0 << 4)
23#define AT_DMA_ARB_CFG_ROUND_ROBIN (0x1 << 4)
24
25#define AT_DMA_EN 0x04 /* Controller Enable Register */
26#define AT_DMA_ENABLE (0x1 << 0)
27
28#define AT_DMA_SREQ 0x08 /* Software Single Request Register */
29#define AT_DMA_SSREQ(x) (0x1 << ((x) << 1)) /* Request a source single transfer on channel x */
30#define AT_DMA_DSREQ(x) (0x1 << (1 + ((x) << 1))) /* Request a destination single transfer on channel x */
31
32#define AT_DMA_CREQ 0x0C /* Software Chunk Transfer Request Register */
33#define AT_DMA_SCREQ(x) (0x1 << ((x) << 1)) /* Request a source chunk transfer on channel x */
34#define AT_DMA_DCREQ(x) (0x1 << (1 + ((x) << 1))) /* Request a destination chunk transfer on channel x */
35
36#define AT_DMA_LAST 0x10 /* Software Last Transfer Flag Register */
37#define AT_DMA_SLAST(x) (0x1 << ((x) << 1)) /* This src rq is last tx of buffer on channel x */
38#define AT_DMA_DLAST(x) (0x1 << (1 + ((x) << 1))) /* This dst rq is last tx of buffer on channel x */
39
40#define AT_DMA_SYNC 0x14 /* Request Synchronization Register */
41#define AT_DMA_SYR(h) (0x1 << (h)) /* Synchronize handshake line h */
42
43/* Error, Chained Buffer transfer completed and Buffer transfer completed Interrupt registers */
44#define AT_DMA_EBCIER 0x18 /* Enable register */
45#define AT_DMA_EBCIDR 0x1C /* Disable register */
46#define AT_DMA_EBCIMR 0x20 /* Mask Register */
47#define AT_DMA_EBCISR 0x24 /* Status Register */
48#define AT_DMA_CBTC_OFFSET 8
49#define AT_DMA_ERR_OFFSET 16
50#define AT_DMA_BTC(x) (0x1 << (x))
51#define AT_DMA_CBTC(x) (0x1 << (AT_DMA_CBTC_OFFSET + (x)))
52#define AT_DMA_ERR(x) (0x1 << (AT_DMA_ERR_OFFSET + (x)))
53
54#define AT_DMA_CHER 0x28 /* Channel Handler Enable Register */
55#define AT_DMA_ENA(x) (0x1 << (x))
56#define AT_DMA_SUSP(x) (0x1 << ( 8 + (x)))
57#define AT_DMA_KEEP(x) (0x1 << (24 + (x)))
58
59#define AT_DMA_CHDR 0x2C /* Channel Handler Disable Register */
60#define AT_DMA_DIS(x) (0x1 << (x))
61#define AT_DMA_RES(x) (0x1 << ( 8 + (x)))
62
63#define AT_DMA_CHSR 0x30 /* Channel Handler Status Register */
64#define AT_DMA_EMPT(x) (0x1 << (16 + (x)))
65#define AT_DMA_STAL(x) (0x1 << (24 + (x)))
66
67
68#define AT_DMA_CH_REGS_BASE 0x3C /* Channel registers base address */
69#define ch_regs(x) (AT_DMA_CH_REGS_BASE + (x) * 0x28) /* Channel x base addr */
70
71/* Hardware register offset for each channel */
72#define ATC_SADDR_OFFSET 0x00 /* Source Address Register */
73#define ATC_DADDR_OFFSET 0x04 /* Destination Address Register */
74#define ATC_DSCR_OFFSET 0x08 /* Descriptor Address Register */
75#define ATC_CTRLA_OFFSET 0x0C /* Control A Register */
76#define ATC_CTRLB_OFFSET 0x10 /* Control B Register */
77#define ATC_CFG_OFFSET 0x14 /* Configuration Register */
78#define ATC_SPIP_OFFSET 0x18 /* Src PIP Configuration Register */
79#define ATC_DPIP_OFFSET 0x1C /* Dst PIP Configuration Register */
80
81
82/* Bitfield definitions */
83
84/* Bitfields in DSCR */
85#define ATC_DSCR_IF(i) (0x3 & (i)) /* Dsc feched via AHB-Lite Interface i */
86
87/* Bitfields in CTRLA */
88#define ATC_BTSIZE_MAX 0xFFFFUL /* Maximum Buffer Transfer Size */
89#define ATC_BTSIZE(x) (ATC_BTSIZE_MAX & (x)) /* Buffer Transfer Size */
90#define ATC_SCSIZE_MASK (0x7 << 16) /* Source Chunk Transfer Size */
91#define ATC_SCSIZE_1 (0x0 << 16)
92#define ATC_SCSIZE_4 (0x1 << 16)
93#define ATC_SCSIZE_8 (0x2 << 16)
94#define ATC_SCSIZE_16 (0x3 << 16)
95#define ATC_SCSIZE_32 (0x4 << 16)
96#define ATC_SCSIZE_64 (0x5 << 16)
97#define ATC_SCSIZE_128 (0x6 << 16)
98#define ATC_SCSIZE_256 (0x7 << 16)
99#define ATC_DCSIZE_MASK (0x7 << 20) /* Destination Chunk Transfer Size */
100#define ATC_DCSIZE_1 (0x0 << 20)
101#define ATC_DCSIZE_4 (0x1 << 20)
102#define ATC_DCSIZE_8 (0x2 << 20)
103#define ATC_DCSIZE_16 (0x3 << 20)
104#define ATC_DCSIZE_32 (0x4 << 20)
105#define ATC_DCSIZE_64 (0x5 << 20)
106#define ATC_DCSIZE_128 (0x6 << 20)
107#define ATC_DCSIZE_256 (0x7 << 20)
108#define ATC_SRC_WIDTH_MASK (0x3 << 24) /* Source Single Transfer Size */
109#define ATC_SRC_WIDTH_BYTE (0x0 << 24)
110#define ATC_SRC_WIDTH_HALFWORD (0x1 << 24)
111#define ATC_SRC_WIDTH_WORD (0x2 << 24)
112#define ATC_DST_WIDTH_MASK (0x3 << 28) /* Destination Single Transfer Size */
113#define ATC_DST_WIDTH_BYTE (0x0 << 28)
114#define ATC_DST_WIDTH_HALFWORD (0x1 << 28)
115#define ATC_DST_WIDTH_WORD (0x2 << 28)
116#define ATC_DONE (0x1 << 31) /* Tx Done (only written back in descriptor) */
117
118/* Bitfields in CTRLB */
119#define ATC_SIF(i) (0x3 & (i)) /* Src tx done via AHB-Lite Interface i */
120#define ATC_DIF(i) ((0x3 & (i)) << 4) /* Dst tx done via AHB-Lite Interface i */
121#define ATC_SRC_PIP (0x1 << 8) /* Source Picture-in-Picture enabled */
122#define ATC_DST_PIP (0x1 << 12) /* Destination Picture-in-Picture enabled */
123#define ATC_SRC_DSCR_DIS (0x1 << 16) /* Src Descriptor fetch disable */
124#define ATC_DST_DSCR_DIS (0x1 << 20) /* Dst Descriptor fetch disable */
125#define ATC_FC_MASK (0x7 << 21) /* Choose Flow Controller */
126#define ATC_FC_MEM2MEM (0x0 << 21) /* Mem-to-Mem (DMA) */
127#define ATC_FC_MEM2PER (0x1 << 21) /* Mem-to-Periph (DMA) */
128#define ATC_FC_PER2MEM (0x2 << 21) /* Periph-to-Mem (DMA) */
129#define ATC_FC_PER2PER (0x3 << 21) /* Periph-to-Periph (DMA) */
130#define ATC_FC_PER2MEM_PER (0x4 << 21) /* Periph-to-Mem (Peripheral) */
131#define ATC_FC_MEM2PER_PER (0x5 << 21) /* Mem-to-Periph (Peripheral) */
132#define ATC_FC_PER2PER_PER (0x6 << 21) /* Periph-to-Periph (Src Peripheral) */
133#define ATC_SRC_ADDR_MODE_MASK (0x3 << 24)
134#define ATC_SRC_ADDR_MODE_INCR (0x0 << 24) /* Incrementing Mode */
135#define ATC_SRC_ADDR_MODE_DECR (0x1 << 24) /* Decrementing Mode */
136#define ATC_SRC_ADDR_MODE_FIXED (0x2 << 24) /* Fixed Mode */
137#define ATC_DST_ADDR_MODE_MASK (0x3 << 28)
138#define ATC_DST_ADDR_MODE_INCR (0x0 << 28) /* Incrementing Mode */
139#define ATC_DST_ADDR_MODE_DECR (0x1 << 28) /* Decrementing Mode */
140#define ATC_DST_ADDR_MODE_FIXED (0x2 << 28) /* Fixed Mode */
141#define ATC_IEN (0x1 << 30) /* BTC interrupt enable (active low) */
142#define ATC_AUTO (0x1 << 31) /* Auto multiple buffer tx enable */
143
144/* Bitfields in CFG */
145#define ATC_SRC_PER(h) (0xFU & (h)) /* Channel src rq associated with periph handshaking ifc h */
146#define ATC_DST_PER(h) ((0xFU & (h)) << 4) /* Channel dst rq associated with periph handshaking ifc h */
147#define ATC_SRC_REP (0x1 << 8) /* Source Replay Mod */
148#define ATC_SRC_H2SEL (0x1 << 9) /* Source Handshaking Mod */
149#define ATC_SRC_H2SEL_SW (0x0 << 9)
150#define ATC_SRC_H2SEL_HW (0x1 << 9)
151#define ATC_DST_REP (0x1 << 12) /* Destination Replay Mod */
152#define ATC_DST_H2SEL (0x1 << 13) /* Destination Handshaking Mod */
153#define ATC_DST_H2SEL_SW (0x0 << 13)
154#define ATC_DST_H2SEL_HW (0x1 << 13)
155#define ATC_SOD (0x1 << 16) /* Stop On Done */
156#define ATC_LOCK_IF (0x1 << 20) /* Interface Lock */
157#define ATC_LOCK_B (0x1 << 21) /* AHB Bus Lock */
158#define ATC_LOCK_IF_L (0x1 << 22) /* Master Interface Arbiter Lock */
159#define ATC_LOCK_IF_L_CHUNK (0x0 << 22)
160#define ATC_LOCK_IF_L_BUFFER (0x1 << 22)
161#define ATC_AHB_PROT_MASK (0x7 << 24) /* AHB Protection */
162#define ATC_FIFOCFG_MASK (0x3 << 28) /* FIFO Request Configuration */
163#define ATC_FIFOCFG_LARGESTBURST (0x0 << 28)
164#define ATC_FIFOCFG_HALFFIFO (0x1 << 28)
165#define ATC_FIFOCFG_ENOUGHSPACE (0x2 << 28)
166
167/* Bitfields in SPIP */
168#define ATC_SPIP_HOLE(x) (0xFFFFU & (x))
169#define ATC_SPIP_BOUNDARY(x) ((0x3FF & (x)) << 16)
170
171/* Bitfields in DPIP */
172#define ATC_DPIP_HOLE(x) (0xFFFFU & (x))
173#define ATC_DPIP_BOUNDARY(x) ((0x3FF & (x)) << 16)
174
175
176/*-- descriptors -----------------------------------------------------*/
177
178/* LLI == Linked List Item; aka DMA buffer descriptor */
179struct at_lli {
180 /* values that are not changed by hardware */
181 dma_addr_t saddr;
182 dma_addr_t daddr;
183 /* value that may get written back: */
184 u32 ctrla;
185 /* more values that are not changed by hardware */
186 u32 ctrlb;
187 dma_addr_t dscr; /* chain to next lli */
188};
189
190/**
191 * struct at_desc - software descriptor
192 * @at_lli: hardware lli structure
193 * @txd: support for the async_tx api
194 * @desc_node: node on the channed descriptors list
195 * @len: total transaction bytecount
196 */
197struct at_desc {
198 /* FIRST values the hardware uses */
199 struct at_lli lli;
200
201 /* THEN values for driver housekeeping */
202 struct dma_async_tx_descriptor txd;
203 struct list_head desc_node;
204 size_t len;
205};
206
207static inline struct at_desc *
208txd_to_at_desc(struct dma_async_tx_descriptor *txd)
209{
210 return container_of(txd, struct at_desc, txd);
211}
212
213
214/*-- Channels --------------------------------------------------------*/
215
216/**
217 * struct at_dma_chan - internal representation of an Atmel HDMAC channel
218 * @chan_common: common dmaengine channel object members
219 * @device: parent device
220 * @ch_regs: memory mapped register base
221 * @mask: channel index in a mask
222 * @error_status: transmit error status information from irq handler
223 * to tasklet (use atomic operations)
224 * @tasklet: bottom half to finish transaction work
225 * @lock: serializes enqueue/dequeue operations to descriptors lists
226 * @completed_cookie: identifier for the most recently completed operation
227 * @active_list: list of descriptors dmaengine is being running on
228 * @queue: list of descriptors ready to be submitted to engine
229 * @free_list: list of descriptors usable by the channel
230 * @descs_allocated: records the actual size of the descriptor pool
231 */
232struct at_dma_chan {
233 struct dma_chan chan_common;
234 struct at_dma *device;
235 void __iomem *ch_regs;
236 u8 mask;
237 unsigned long error_status;
238 struct tasklet_struct tasklet;
239
240 spinlock_t lock;
241
242 /* these other elements are all protected by lock */
243 dma_cookie_t completed_cookie;
244 struct list_head active_list;
245 struct list_head queue;
246 struct list_head free_list;
247 unsigned int descs_allocated;
248};
249
250#define channel_readl(atchan, name) \
251 __raw_readl((atchan)->ch_regs + ATC_##name##_OFFSET)
252
253#define channel_writel(atchan, name, val) \
254 __raw_writel((val), (atchan)->ch_regs + ATC_##name##_OFFSET)
255
256static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan)
257{
258 return container_of(dchan, struct at_dma_chan, chan_common);
259}
260
261
262/*-- Controller ------------------------------------------------------*/
263
264/**
265 * struct at_dma - internal representation of an Atmel HDMA Controller
266 * @chan_common: common dmaengine dma_device object members
267 * @ch_regs: memory mapped register base
268 * @clk: dma controller clock
269 * @all_chan_mask: all channels availlable in a mask
270 * @dma_desc_pool: base of DMA descriptor region (DMA address)
271 * @chan: channels table to store at_dma_chan structures
272 */
273struct at_dma {
274 struct dma_device dma_common;
275 void __iomem *regs;
276 struct clk *clk;
277
278 u8 all_chan_mask;
279
280 struct dma_pool *dma_desc_pool;
281 /* AT THE END channels table */
282 struct at_dma_chan chan[0];
283};
284
285#define dma_readl(atdma, name) \
286 __raw_readl((atdma)->regs + AT_DMA_##name)
287#define dma_writel(atdma, name, val) \
288 __raw_writel((val), (atdma)->regs + AT_DMA_##name)
289
290static inline struct at_dma *to_at_dma(struct dma_device *ddev)
291{
292 return container_of(ddev, struct at_dma, dma_common);
293}
294
295
296/*-- Helper functions ------------------------------------------------*/
297
298static struct device *chan2dev(struct dma_chan *chan)
299{
300 return &chan->dev->device;
301}
302static struct device *chan2parent(struct dma_chan *chan)
303{
304 return chan->dev->device.parent;
305}
306
307#if defined(VERBOSE_DEBUG)
308static void vdbg_dump_regs(struct at_dma_chan *atchan)
309{
310 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
311
312 dev_err(chan2dev(&atchan->chan_common),
313 " channel %d : imr = 0x%x, chsr = 0x%x\n",
314 atchan->chan_common.chan_id,
315 dma_readl(atdma, EBCIMR),
316 dma_readl(atdma, CHSR));
317
318 dev_err(chan2dev(&atchan->chan_common),
319 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
320 channel_readl(atchan, SADDR),
321 channel_readl(atchan, DADDR),
322 channel_readl(atchan, CTRLA),
323 channel_readl(atchan, CTRLB),
324 channel_readl(atchan, DSCR));
325}
326#else
327static void vdbg_dump_regs(struct at_dma_chan *atchan) {}
328#endif
329
330static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli)
331{
332 dev_printk(KERN_CRIT, chan2dev(&atchan->chan_common),
333 " desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
334 lli->saddr, lli->daddr,
335 lli->ctrla, lli->ctrlb, lli->dscr);
336}
337
338
339static void atc_setup_irq(struct at_dma_chan *atchan, int on)
340{
341 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
342 u32 ebci;
343
344 /* enable interrupts on buffer chain completion & error */
345 ebci = AT_DMA_CBTC(atchan->chan_common.chan_id)
346 | AT_DMA_ERR(atchan->chan_common.chan_id);
347 if (on)
348 dma_writel(atdma, EBCIER, ebci);
349 else
350 dma_writel(atdma, EBCIDR, ebci);
351}
352
353static inline void atc_enable_irq(struct at_dma_chan *atchan)
354{
355 atc_setup_irq(atchan, 1);
356}
357
358static inline void atc_disable_irq(struct at_dma_chan *atchan)
359{
360 atc_setup_irq(atchan, 0);
361}
362
363
364/**
365 * atc_chan_is_enabled - test if given channel is enabled
366 * @atchan: channel we want to test status
367 */
368static inline int atc_chan_is_enabled(struct at_dma_chan *atchan)
369{
370 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
371
372 return !!(dma_readl(atdma, CHSR) & atchan->mask);
373}
374
375
376/**
377 * set_desc_eol - set end-of-link to descriptor so it will end transfer
378 * @desc: descriptor, signle or at the end of a chain, to end chain on
379 */
380static void set_desc_eol(struct at_desc *desc)
381{
382 desc->lli.ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS;
383 desc->lli.dscr = 0;
384}
385
386#endif /* AT_HDMAC_REGS_H */