aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig20
-rw-r--r--drivers/dma/Makefile2
-rw-r--r--drivers/dma/at_hdmac.c1213
-rw-r--r--drivers/dma/at_hdmac_regs.h353
-rw-r--r--drivers/dma/dmatest.c21
-rw-r--r--drivers/dma/fsldma.c17
-rw-r--r--drivers/dma/fsldma.h1
-rw-r--r--drivers/dma/mv_xor.c2
-rw-r--r--drivers/dma/txx9dmac.c1358
-rw-r--r--drivers/dma/txx9dmac.h307
10 files changed, 3282 insertions, 12 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 3b3c01b6f1ee..81e1020fb514 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -4,7 +4,7 @@
4 4
5menuconfig DMADEVICES 5menuconfig DMADEVICES
6 bool "DMA Engine support" 6 bool "DMA Engine support"
7 depends on !HIGHMEM64G && HAS_DMA 7 depends on HAS_DMA
8 help 8 help
9 DMA engines can do asynchronous data transfers without 9 DMA engines can do asynchronous data transfers without
10 involving the host CPU. Currently, this framework can be 10 involving the host CPU. Currently, this framework can be
@@ -46,6 +46,14 @@ config DW_DMAC
46 Support the Synopsys DesignWare AHB DMA controller. This 46 Support the Synopsys DesignWare AHB DMA controller. This
47 can be integrated in chips such as the Atmel AT32ap7000. 47 can be integrated in chips such as the Atmel AT32ap7000.
48 48
49config AT_HDMAC
50 tristate "Atmel AHB DMA support"
51 depends on ARCH_AT91SAM9RL
52 select DMA_ENGINE
53 help
54 Support the Atmel AHB DMA controller. This can be integrated in
55 chips such as the Atmel AT91SAM9RL.
56
49config FSL_DMA 57config FSL_DMA
50 tristate "Freescale Elo and Elo Plus DMA support" 58 tristate "Freescale Elo and Elo Plus DMA support"
51 depends on FSL_SOC 59 depends on FSL_SOC
@@ -81,6 +89,14 @@ config MX3_IPU_IRQS
81 To avoid bloating the irq_desc[] array we allocate a sufficient 89 To avoid bloating the irq_desc[] array we allocate a sufficient
82 number of IRQ slots and map them dynamically to specific sources. 90 number of IRQ slots and map them dynamically to specific sources.
83 91
92config TXX9_DMAC
93 tristate "Toshiba TXx9 SoC DMA support"
94 depends on MACH_TX49XX || MACH_TX39XX
95 select DMA_ENGINE
96 help
97 Support the TXx9 SoC internal DMA controller. This can be
98 integrated in chips such as the Toshiba TX4927/38/39.
99
84config DMA_ENGINE 100config DMA_ENGINE
85 bool 101 bool
86 102
@@ -100,7 +116,7 @@ config NET_DMA
100 116
101config ASYNC_TX_DMA 117config ASYNC_TX_DMA
102 bool "Async_tx: Offload support for the async_tx api" 118 bool "Async_tx: Offload support for the async_tx api"
103 depends on DMA_ENGINE 119 depends on DMA_ENGINE && !HIGHMEM64G
104 help 120 help
105 This allows the async_tx api to take advantage of offload engines for 121 This allows the async_tx api to take advantage of offload engines for
106 memcpy, memset, xor, and raid6 p+q operations. If your platform has 122 memcpy, memset, xor, and raid6 p+q operations. If your platform has
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index a1cb2857bba6..8f115e93b4a1 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -6,4 +6,6 @@ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
6obj-$(CONFIG_FSL_DMA) += fsldma.o 6obj-$(CONFIG_FSL_DMA) += fsldma.o
7obj-$(CONFIG_MV_XOR) += mv_xor.o 7obj-$(CONFIG_MV_XOR) += mv_xor.o
8obj-$(CONFIG_DW_DMAC) += dw_dmac.o 8obj-$(CONFIG_DW_DMAC) += dw_dmac.o
9obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
9obj-$(CONFIG_MX3_IPU) += ipu/ 10obj-$(CONFIG_MX3_IPU) += ipu/
11obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
new file mode 100644
index 000000000000..9a1e5fb412ed
--- /dev/null
+++ b/drivers/dma/at_hdmac.c
@@ -0,0 +1,1213 @@
1/*
2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
3 *
4 * Copyright (C) 2008 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 *
12 * This supports the Atmel AHB DMA Controller,
13 *
14 * The driver has currently been tested with the Atmel AT91SAM9RL
15 * and AT91SAM9G45 series.
16 */
17
18#include <linux/clk.h>
19#include <linux/dmaengine.h>
20#include <linux/dma-mapping.h>
21#include <linux/dmapool.h>
22#include <linux/interrupt.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
25
26#include "at_hdmac_regs.h"
27
28/*
29 * Glossary
30 * --------
31 *
32 * at_hdmac : Name of the ATmel AHB DMA Controller
33 * at_dma_ / atdma : ATmel DMA controller entity related
34 * atc_ / atchan : ATmel DMA Channel entity related
35 */
36
37#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
38#define ATC_DEFAULT_CTRLA (0)
39#define ATC_DEFAULT_CTRLB (ATC_SIF(0) \
40 |ATC_DIF(1))
41
42/*
43 * Initial number of descriptors to allocate for each channel. This could
44 * be increased during dma usage.
45 */
46static unsigned int init_nr_desc_per_channel = 64;
47module_param(init_nr_desc_per_channel, uint, 0644);
48MODULE_PARM_DESC(init_nr_desc_per_channel,
49 "initial descriptors per channel (default: 64)");
50
51
52/* prototypes */
53static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
54
55
56/*----------------------------------------------------------------------*/
57
58static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
59{
60 return list_first_entry(&atchan->active_list,
61 struct at_desc, desc_node);
62}
63
64static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
65{
66 return list_first_entry(&atchan->queue,
67 struct at_desc, desc_node);
68}
69
70/**
71 * atc_alloc_descriptor - allocate and return an initilized descriptor
72 * @chan: the channel to allocate descriptors for
73 * @gfp_flags: GFP allocation flags
74 *
75 * Note: The ack-bit is positioned in the descriptor flag at creation time
76 * to make initial allocation more convenient. This bit will be cleared
77 * and control will be given to client at usage time (during
78 * preparation functions).
79 */
80static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
81 gfp_t gfp_flags)
82{
83 struct at_desc *desc = NULL;
84 struct at_dma *atdma = to_at_dma(chan->device);
85 dma_addr_t phys;
86
87 desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
88 if (desc) {
89 memset(desc, 0, sizeof(struct at_desc));
90 dma_async_tx_descriptor_init(&desc->txd, chan);
91 /* txd.flags will be overwritten in prep functions */
92 desc->txd.flags = DMA_CTRL_ACK;
93 desc->txd.tx_submit = atc_tx_submit;
94 desc->txd.phys = phys;
95 }
96
97 return desc;
98}
99
100/**
101 * atc_desc_get - get a unsused descriptor from free_list
102 * @atchan: channel we want a new descriptor for
103 */
104static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
105{
106 struct at_desc *desc, *_desc;
107 struct at_desc *ret = NULL;
108 unsigned int i = 0;
109 LIST_HEAD(tmp_list);
110
111 spin_lock_bh(&atchan->lock);
112 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
113 i++;
114 if (async_tx_test_ack(&desc->txd)) {
115 list_del(&desc->desc_node);
116 ret = desc;
117 break;
118 }
119 dev_dbg(chan2dev(&atchan->chan_common),
120 "desc %p not ACKed\n", desc);
121 }
122 spin_unlock_bh(&atchan->lock);
123 dev_vdbg(chan2dev(&atchan->chan_common),
124 "scanned %u descriptors on freelist\n", i);
125
126 /* no more descriptor available in initial pool: create one more */
127 if (!ret) {
128 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
129 if (ret) {
130 spin_lock_bh(&atchan->lock);
131 atchan->descs_allocated++;
132 spin_unlock_bh(&atchan->lock);
133 } else {
134 dev_err(chan2dev(&atchan->chan_common),
135 "not enough descriptors available\n");
136 }
137 }
138
139 return ret;
140}
141
142/**
143 * atc_desc_put - move a descriptor, including any children, to the free list
144 * @atchan: channel we work on
145 * @desc: descriptor, at the head of a chain, to move to free list
146 */
147static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
148{
149 if (desc) {
150 struct at_desc *child;
151
152 spin_lock_bh(&atchan->lock);
153 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
154 dev_vdbg(chan2dev(&atchan->chan_common),
155 "moving child desc %p to freelist\n",
156 child);
157 list_splice_init(&desc->txd.tx_list, &atchan->free_list);
158 dev_vdbg(chan2dev(&atchan->chan_common),
159 "moving desc %p to freelist\n", desc);
160 list_add(&desc->desc_node, &atchan->free_list);
161 spin_unlock_bh(&atchan->lock);
162 }
163}
164
165/**
166 * atc_assign_cookie - compute and assign new cookie
167 * @atchan: channel we work on
168 * @desc: descriptor to asign cookie for
169 *
170 * Called with atchan->lock held and bh disabled
171 */
172static dma_cookie_t
173atc_assign_cookie(struct at_dma_chan *atchan, struct at_desc *desc)
174{
175 dma_cookie_t cookie = atchan->chan_common.cookie;
176
177 if (++cookie < 0)
178 cookie = 1;
179
180 atchan->chan_common.cookie = cookie;
181 desc->txd.cookie = cookie;
182
183 return cookie;
184}
185
186/**
187 * atc_dostart - starts the DMA engine for real
188 * @atchan: the channel we want to start
189 * @first: first descriptor in the list we want to begin with
190 *
191 * Called with atchan->lock held and bh disabled
192 */
193static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
194{
195 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
196
197 /* ASSERT: channel is idle */
198 if (atc_chan_is_enabled(atchan)) {
199 dev_err(chan2dev(&atchan->chan_common),
200 "BUG: Attempted to start non-idle channel\n");
201 dev_err(chan2dev(&atchan->chan_common),
202 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
203 channel_readl(atchan, SADDR),
204 channel_readl(atchan, DADDR),
205 channel_readl(atchan, CTRLA),
206 channel_readl(atchan, CTRLB),
207 channel_readl(atchan, DSCR));
208
209 /* The tasklet will hopefully advance the queue... */
210 return;
211 }
212
213 vdbg_dump_regs(atchan);
214
215 /* clear any pending interrupt */
216 while (dma_readl(atdma, EBCISR))
217 cpu_relax();
218
219 channel_writel(atchan, SADDR, 0);
220 channel_writel(atchan, DADDR, 0);
221 channel_writel(atchan, CTRLA, 0);
222 channel_writel(atchan, CTRLB, 0);
223 channel_writel(atchan, DSCR, first->txd.phys);
224 dma_writel(atdma, CHER, atchan->mask);
225
226 vdbg_dump_regs(atchan);
227}
228
229/**
230 * atc_chain_complete - finish work for one transaction chain
231 * @atchan: channel we work on
232 * @desc: descriptor at the head of the chain we want do complete
233 *
234 * Called with atchan->lock held and bh disabled */
235static void
236atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
237{
238 dma_async_tx_callback callback;
239 void *param;
240 struct dma_async_tx_descriptor *txd = &desc->txd;
241
242 dev_vdbg(chan2dev(&atchan->chan_common),
243 "descriptor %u complete\n", txd->cookie);
244
245 atchan->completed_cookie = txd->cookie;
246 callback = txd->callback;
247 param = txd->callback_param;
248
249 /* move children to free_list */
250 list_splice_init(&txd->tx_list, &atchan->free_list);
251 /* move myself to free_list */
252 list_move(&desc->desc_node, &atchan->free_list);
253
254 /* unmap dma addresses */
255 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
256 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
257 dma_unmap_single(chan2parent(&atchan->chan_common),
258 desc->lli.daddr,
259 desc->len, DMA_FROM_DEVICE);
260 else
261 dma_unmap_page(chan2parent(&atchan->chan_common),
262 desc->lli.daddr,
263 desc->len, DMA_FROM_DEVICE);
264 }
265 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
266 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
267 dma_unmap_single(chan2parent(&atchan->chan_common),
268 desc->lli.saddr,
269 desc->len, DMA_TO_DEVICE);
270 else
271 dma_unmap_page(chan2parent(&atchan->chan_common),
272 desc->lli.saddr,
273 desc->len, DMA_TO_DEVICE);
274 }
275
276 /*
277 * The API requires that no submissions are done from a
278 * callback, so we don't need to drop the lock here
279 */
280 if (callback)
281 callback(param);
282
283 dma_run_dependencies(txd);
284}
285
286/**
287 * atc_complete_all - finish work for all transactions
288 * @atchan: channel to complete transactions for
289 *
290 * Eventually submit queued descriptors if any
291 *
292 * Assume channel is idle while calling this function
293 * Called with atchan->lock held and bh disabled
294 */
295static void atc_complete_all(struct at_dma_chan *atchan)
296{
297 struct at_desc *desc, *_desc;
298 LIST_HEAD(list);
299
300 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
301
302 BUG_ON(atc_chan_is_enabled(atchan));
303
304 /*
305 * Submit queued descriptors ASAP, i.e. before we go through
306 * the completed ones.
307 */
308 if (!list_empty(&atchan->queue))
309 atc_dostart(atchan, atc_first_queued(atchan));
310 /* empty active_list now it is completed */
311 list_splice_init(&atchan->active_list, &list);
312 /* empty queue list by moving descriptors (if any) to active_list */
313 list_splice_init(&atchan->queue, &atchan->active_list);
314
315 list_for_each_entry_safe(desc, _desc, &list, desc_node)
316 atc_chain_complete(atchan, desc);
317}
318
319/**
320 * atc_cleanup_descriptors - cleanup up finished descriptors in active_list
321 * @atchan: channel to be cleaned up
322 *
323 * Called with atchan->lock held and bh disabled
324 */
325static void atc_cleanup_descriptors(struct at_dma_chan *atchan)
326{
327 struct at_desc *desc, *_desc;
328 struct at_desc *child;
329
330 dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n");
331
332 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
333 if (!(desc->lli.ctrla & ATC_DONE))
334 /* This one is currently in progress */
335 return;
336
337 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
338 if (!(child->lli.ctrla & ATC_DONE))
339 /* Currently in progress */
340 return;
341
342 /*
343 * No descriptors so far seem to be in progress, i.e.
344 * this chain must be done.
345 */
346 atc_chain_complete(atchan, desc);
347 }
348}
349
350/**
351 * atc_advance_work - at the end of a transaction, move forward
352 * @atchan: channel where the transaction ended
353 *
354 * Called with atchan->lock held and bh disabled
355 */
356static void atc_advance_work(struct at_dma_chan *atchan)
357{
358 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
359
360 if (list_empty(&atchan->active_list) ||
361 list_is_singular(&atchan->active_list)) {
362 atc_complete_all(atchan);
363 } else {
364 atc_chain_complete(atchan, atc_first_active(atchan));
365 /* advance work */
366 atc_dostart(atchan, atc_first_active(atchan));
367 }
368}
369
370
371/**
372 * atc_handle_error - handle errors reported by DMA controller
373 * @atchan: channel where error occurs
374 *
375 * Called with atchan->lock held and bh disabled
376 */
377static void atc_handle_error(struct at_dma_chan *atchan)
378{
379 struct at_desc *bad_desc;
380 struct at_desc *child;
381
382 /*
383 * The descriptor currently at the head of the active list is
384 * broked. Since we don't have any way to report errors, we'll
385 * just have to scream loudly and try to carry on.
386 */
387 bad_desc = atc_first_active(atchan);
388 list_del_init(&bad_desc->desc_node);
389
390 /* As we are stopped, take advantage to push queued descriptors
391 * in active_list */
392 list_splice_init(&atchan->queue, atchan->active_list.prev);
393
394 /* Try to restart the controller */
395 if (!list_empty(&atchan->active_list))
396 atc_dostart(atchan, atc_first_active(atchan));
397
398 /*
399 * KERN_CRITICAL may seem harsh, but since this only happens
400 * when someone submits a bad physical address in a
401 * descriptor, we should consider ourselves lucky that the
402 * controller flagged an error instead of scribbling over
403 * random memory locations.
404 */
405 dev_crit(chan2dev(&atchan->chan_common),
406 "Bad descriptor submitted for DMA!\n");
407 dev_crit(chan2dev(&atchan->chan_common),
408 " cookie: %d\n", bad_desc->txd.cookie);
409 atc_dump_lli(atchan, &bad_desc->lli);
410 list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node)
411 atc_dump_lli(atchan, &child->lli);
412
413 /* Pretend the descriptor completed successfully */
414 atc_chain_complete(atchan, bad_desc);
415}
416
417
418/*-- IRQ & Tasklet ---------------------------------------------------*/
419
420static void atc_tasklet(unsigned long data)
421{
422 struct at_dma_chan *atchan = (struct at_dma_chan *)data;
423
424 /* Channel cannot be enabled here */
425 if (atc_chan_is_enabled(atchan)) {
426 dev_err(chan2dev(&atchan->chan_common),
427 "BUG: channel enabled in tasklet\n");
428 return;
429 }
430
431 spin_lock(&atchan->lock);
432 if (test_and_clear_bit(0, &atchan->error_status))
433 atc_handle_error(atchan);
434 else
435 atc_advance_work(atchan);
436
437 spin_unlock(&atchan->lock);
438}
439
440static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
441{
442 struct at_dma *atdma = (struct at_dma *)dev_id;
443 struct at_dma_chan *atchan;
444 int i;
445 u32 status, pending, imr;
446 int ret = IRQ_NONE;
447
448 do {
449 imr = dma_readl(atdma, EBCIMR);
450 status = dma_readl(atdma, EBCISR);
451 pending = status & imr;
452
453 if (!pending)
454 break;
455
456 dev_vdbg(atdma->dma_common.dev,
457 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
458 status, imr, pending);
459
460 for (i = 0; i < atdma->dma_common.chancnt; i++) {
461 atchan = &atdma->chan[i];
462 if (pending & (AT_DMA_CBTC(i) | AT_DMA_ERR(i))) {
463 if (pending & AT_DMA_ERR(i)) {
464 /* Disable channel on AHB error */
465 dma_writel(atdma, CHDR, atchan->mask);
466 /* Give information to tasklet */
467 set_bit(0, &atchan->error_status);
468 }
469 tasklet_schedule(&atchan->tasklet);
470 ret = IRQ_HANDLED;
471 }
472 }
473
474 } while (pending);
475
476 return ret;
477}
478
479
480/*-- DMA Engine API --------------------------------------------------*/
481
482/**
483 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
484 * @desc: descriptor at the head of the transaction chain
485 *
486 * Queue chain if DMA engine is working already
487 *
488 * Cookie increment and adding to active_list or queue must be atomic
489 */
490static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
491{
492 struct at_desc *desc = txd_to_at_desc(tx);
493 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
494 dma_cookie_t cookie;
495
496 spin_lock_bh(&atchan->lock);
497 cookie = atc_assign_cookie(atchan, desc);
498
499 if (list_empty(&atchan->active_list)) {
500 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
501 desc->txd.cookie);
502 atc_dostart(atchan, desc);
503 list_add_tail(&desc->desc_node, &atchan->active_list);
504 } else {
505 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
506 desc->txd.cookie);
507 list_add_tail(&desc->desc_node, &atchan->queue);
508 }
509
510 spin_unlock_bh(&atchan->lock);
511
512 return cookie;
513}
514
515/**
516 * atc_prep_dma_memcpy - prepare a memcpy operation
517 * @chan: the channel to prepare operation on
518 * @dest: operation virtual destination address
519 * @src: operation virtual source address
520 * @len: operation length
521 * @flags: tx descriptor status flags
522 */
523static struct dma_async_tx_descriptor *
524atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
525 size_t len, unsigned long flags)
526{
527 struct at_dma_chan *atchan = to_at_dma_chan(chan);
528 struct at_desc *desc = NULL;
529 struct at_desc *first = NULL;
530 struct at_desc *prev = NULL;
531 size_t xfer_count;
532 size_t offset;
533 unsigned int src_width;
534 unsigned int dst_width;
535 u32 ctrla;
536 u32 ctrlb;
537
538 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
539 dest, src, len, flags);
540
541 if (unlikely(!len)) {
542 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
543 return NULL;
544 }
545
546 ctrla = ATC_DEFAULT_CTRLA;
547 ctrlb = ATC_DEFAULT_CTRLB
548 | ATC_SRC_ADDR_MODE_INCR
549 | ATC_DST_ADDR_MODE_INCR
550 | ATC_FC_MEM2MEM;
551
552 /*
553 * We can be a lot more clever here, but this should take care
554 * of the most common optimization.
555 */
556 if (!((src | dest | len) & 3)) {
557 ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
558 src_width = dst_width = 2;
559 } else if (!((src | dest | len) & 1)) {
560 ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
561 src_width = dst_width = 1;
562 } else {
563 ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
564 src_width = dst_width = 0;
565 }
566
567 for (offset = 0; offset < len; offset += xfer_count << src_width) {
568 xfer_count = min_t(size_t, (len - offset) >> src_width,
569 ATC_BTSIZE_MAX);
570
571 desc = atc_desc_get(atchan);
572 if (!desc)
573 goto err_desc_get;
574
575 desc->lli.saddr = src + offset;
576 desc->lli.daddr = dest + offset;
577 desc->lli.ctrla = ctrla | xfer_count;
578 desc->lli.ctrlb = ctrlb;
579
580 desc->txd.cookie = 0;
581 async_tx_ack(&desc->txd);
582
583 if (!first) {
584 first = desc;
585 } else {
586 /* inform the HW lli about chaining */
587 prev->lli.dscr = desc->txd.phys;
588 /* insert the link descriptor to the LD ring */
589 list_add_tail(&desc->desc_node,
590 &first->txd.tx_list);
591 }
592 prev = desc;
593 }
594
595 /* First descriptor of the chain embedds additional information */
596 first->txd.cookie = -EBUSY;
597 first->len = len;
598
599 /* set end-of-link to the last link descriptor of list*/
600 set_desc_eol(desc);
601
602 desc->txd.flags = flags; /* client is in control of this ack */
603
604 return &first->txd;
605
606err_desc_get:
607 atc_desc_put(atchan, first);
608 return NULL;
609}
610
611
612/**
613 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
614 * @chan: DMA channel
615 * @sgl: scatterlist to transfer to/from
616 * @sg_len: number of entries in @scatterlist
617 * @direction: DMA direction
618 * @flags: tx descriptor status flags
619 */
620static struct dma_async_tx_descriptor *
621atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
622 unsigned int sg_len, enum dma_data_direction direction,
623 unsigned long flags)
624{
625 struct at_dma_chan *atchan = to_at_dma_chan(chan);
626 struct at_dma_slave *atslave = chan->private;
627 struct at_desc *first = NULL;
628 struct at_desc *prev = NULL;
629 u32 ctrla;
630 u32 ctrlb;
631 dma_addr_t reg;
632 unsigned int reg_width;
633 unsigned int mem_width;
634 unsigned int i;
635 struct scatterlist *sg;
636 size_t total_len = 0;
637
638 dev_vdbg(chan2dev(chan), "prep_slave_sg: %s f0x%lx\n",
639 direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
640 flags);
641
642 if (unlikely(!atslave || !sg_len)) {
643 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
644 return NULL;
645 }
646
647 reg_width = atslave->reg_width;
648
649 sg_len = dma_map_sg(chan2parent(chan), sgl, sg_len, direction);
650
651 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
652 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN;
653
654 switch (direction) {
655 case DMA_TO_DEVICE:
656 ctrla |= ATC_DST_WIDTH(reg_width);
657 ctrlb |= ATC_DST_ADDR_MODE_FIXED
658 | ATC_SRC_ADDR_MODE_INCR
659 | ATC_FC_MEM2PER;
660 reg = atslave->tx_reg;
661 for_each_sg(sgl, sg, sg_len, i) {
662 struct at_desc *desc;
663 u32 len;
664 u32 mem;
665
666 desc = atc_desc_get(atchan);
667 if (!desc)
668 goto err_desc_get;
669
670 mem = sg_phys(sg);
671 len = sg_dma_len(sg);
672 mem_width = 2;
673 if (unlikely(mem & 3 || len & 3))
674 mem_width = 0;
675
676 desc->lli.saddr = mem;
677 desc->lli.daddr = reg;
678 desc->lli.ctrla = ctrla
679 | ATC_SRC_WIDTH(mem_width)
680 | len >> mem_width;
681 desc->lli.ctrlb = ctrlb;
682
683 if (!first) {
684 first = desc;
685 } else {
686 /* inform the HW lli about chaining */
687 prev->lli.dscr = desc->txd.phys;
688 /* insert the link descriptor to the LD ring */
689 list_add_tail(&desc->desc_node,
690 &first->txd.tx_list);
691 }
692 prev = desc;
693 total_len += len;
694 }
695 break;
696 case DMA_FROM_DEVICE:
697 ctrla |= ATC_SRC_WIDTH(reg_width);
698 ctrlb |= ATC_DST_ADDR_MODE_INCR
699 | ATC_SRC_ADDR_MODE_FIXED
700 | ATC_FC_PER2MEM;
701
702 reg = atslave->rx_reg;
703 for_each_sg(sgl, sg, sg_len, i) {
704 struct at_desc *desc;
705 u32 len;
706 u32 mem;
707
708 desc = atc_desc_get(atchan);
709 if (!desc)
710 goto err_desc_get;
711
712 mem = sg_phys(sg);
713 len = sg_dma_len(sg);
714 mem_width = 2;
715 if (unlikely(mem & 3 || len & 3))
716 mem_width = 0;
717
718 desc->lli.saddr = reg;
719 desc->lli.daddr = mem;
720 desc->lli.ctrla = ctrla
721 | ATC_DST_WIDTH(mem_width)
722 | len >> mem_width;
723 desc->lli.ctrlb = ctrlb;
724
725 if (!first) {
726 first = desc;
727 } else {
728 /* inform the HW lli about chaining */
729 prev->lli.dscr = desc->txd.phys;
730 /* insert the link descriptor to the LD ring */
731 list_add_tail(&desc->desc_node,
732 &first->txd.tx_list);
733 }
734 prev = desc;
735 total_len += len;
736 }
737 break;
738 default:
739 return NULL;
740 }
741
742 /* set end-of-link to the last link descriptor of list*/
743 set_desc_eol(prev);
744
745 /* First descriptor of the chain embedds additional information */
746 first->txd.cookie = -EBUSY;
747 first->len = total_len;
748
749 /* last link descriptor of list is responsible of flags */
750 prev->txd.flags = flags; /* client is in control of this ack */
751
752 return &first->txd;
753
754err_desc_get:
755 dev_err(chan2dev(chan), "not enough descriptors available\n");
756 atc_desc_put(atchan, first);
757 return NULL;
758}
759
760static void atc_terminate_all(struct dma_chan *chan)
761{
762 struct at_dma_chan *atchan = to_at_dma_chan(chan);
763 struct at_dma *atdma = to_at_dma(chan->device);
764 struct at_desc *desc, *_desc;
765 LIST_HEAD(list);
766
767 /*
768 * This is only called when something went wrong elsewhere, so
769 * we don't really care about the data. Just disable the
770 * channel. We still have to poll the channel enable bit due
771 * to AHB/HSB limitations.
772 */
773 spin_lock_bh(&atchan->lock);
774
775 dma_writel(atdma, CHDR, atchan->mask);
776
777 /* confirm that this channel is disabled */
778 while (dma_readl(atdma, CHSR) & atchan->mask)
779 cpu_relax();
780
781 /* active_list entries will end up before queued entries */
782 list_splice_init(&atchan->queue, &list);
783 list_splice_init(&atchan->active_list, &list);
784
785 spin_unlock_bh(&atchan->lock);
786
787 /* Flush all pending and queued descriptors */
788 list_for_each_entry_safe(desc, _desc, &list, desc_node)
789 atc_chain_complete(atchan, desc);
790}
791
792/**
793 * atc_is_tx_complete - poll for transaction completion
794 * @chan: DMA channel
795 * @cookie: transaction identifier to check status of
796 * @done: if not %NULL, updated with last completed transaction
797 * @used: if not %NULL, updated with last used transaction
798 *
799 * If @done and @used are passed in, upon return they reflect the driver
800 * internal state and can be used with dma_async_is_complete() to check
801 * the status of multiple cookies without re-checking hardware state.
802 */
803static enum dma_status
804atc_is_tx_complete(struct dma_chan *chan,
805 dma_cookie_t cookie,
806 dma_cookie_t *done, dma_cookie_t *used)
807{
808 struct at_dma_chan *atchan = to_at_dma_chan(chan);
809 dma_cookie_t last_used;
810 dma_cookie_t last_complete;
811 enum dma_status ret;
812
813 dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n",
814 cookie, done ? *done : 0, used ? *used : 0);
815
816 spin_lock_bh(atchan->lock);
817
818 last_complete = atchan->completed_cookie;
819 last_used = chan->cookie;
820
821 ret = dma_async_is_complete(cookie, last_complete, last_used);
822 if (ret != DMA_SUCCESS) {
823 atc_cleanup_descriptors(atchan);
824
825 last_complete = atchan->completed_cookie;
826 last_used = chan->cookie;
827
828 ret = dma_async_is_complete(cookie, last_complete, last_used);
829 }
830
831 spin_unlock_bh(atchan->lock);
832
833 if (done)
834 *done = last_complete;
835 if (used)
836 *used = last_used;
837
838 return ret;
839}
840
841/**
842 * atc_issue_pending - try to finish work
843 * @chan: target DMA channel
844 */
845static void atc_issue_pending(struct dma_chan *chan)
846{
847 struct at_dma_chan *atchan = to_at_dma_chan(chan);
848
849 dev_vdbg(chan2dev(chan), "issue_pending\n");
850
851 if (!atc_chan_is_enabled(atchan)) {
852 spin_lock_bh(&atchan->lock);
853 atc_advance_work(atchan);
854 spin_unlock_bh(&atchan->lock);
855 }
856}
857
858/**
859 * atc_alloc_chan_resources - allocate resources for DMA channel
860 * @chan: allocate descriptor resources for this channel
861 * @client: current client requesting the channel be ready for requests
862 *
863 * return - the number of allocated descriptors
864 */
865static int atc_alloc_chan_resources(struct dma_chan *chan)
866{
867 struct at_dma_chan *atchan = to_at_dma_chan(chan);
868 struct at_dma *atdma = to_at_dma(chan->device);
869 struct at_desc *desc;
870 struct at_dma_slave *atslave;
871 int i;
872 u32 cfg;
873 LIST_HEAD(tmp_list);
874
875 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
876
877 /* ASSERT: channel is idle */
878 if (atc_chan_is_enabled(atchan)) {
879 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
880 return -EIO;
881 }
882
883 cfg = ATC_DEFAULT_CFG;
884
885 atslave = chan->private;
886 if (atslave) {
887 /*
888 * We need controller-specific data to set up slave
889 * transfers.
890 */
891 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
892
893 /* if cfg configuration specified take it instad of default */
894 if (atslave->cfg)
895 cfg = atslave->cfg;
896 }
897
898 /* have we already been set up?
899 * reconfigure channel but no need to reallocate descriptors */
900 if (!list_empty(&atchan->free_list))
901 return atchan->descs_allocated;
902
903 /* Allocate initial pool of descriptors */
904 for (i = 0; i < init_nr_desc_per_channel; i++) {
905 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
906 if (!desc) {
907 dev_err(atdma->dma_common.dev,
908 "Only %d initial descriptors\n", i);
909 break;
910 }
911 list_add_tail(&desc->desc_node, &tmp_list);
912 }
913
914 spin_lock_bh(&atchan->lock);
915 atchan->descs_allocated = i;
916 list_splice(&tmp_list, &atchan->free_list);
917 atchan->completed_cookie = chan->cookie = 1;
918 spin_unlock_bh(&atchan->lock);
919
920 /* channel parameters */
921 channel_writel(atchan, CFG, cfg);
922
923 dev_dbg(chan2dev(chan),
924 "alloc_chan_resources: allocated %d descriptors\n",
925 atchan->descs_allocated);
926
927 return atchan->descs_allocated;
928}
929
930/**
931 * atc_free_chan_resources - free all channel resources
932 * @chan: DMA channel
933 */
934static void atc_free_chan_resources(struct dma_chan *chan)
935{
936 struct at_dma_chan *atchan = to_at_dma_chan(chan);
937 struct at_dma *atdma = to_at_dma(chan->device);
938 struct at_desc *desc, *_desc;
939 LIST_HEAD(list);
940
941 dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
942 atchan->descs_allocated);
943
944 /* ASSERT: channel is idle */
945 BUG_ON(!list_empty(&atchan->active_list));
946 BUG_ON(!list_empty(&atchan->queue));
947 BUG_ON(atc_chan_is_enabled(atchan));
948
949 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
950 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
951 list_del(&desc->desc_node);
952 /* free link descriptor */
953 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
954 }
955 list_splice_init(&atchan->free_list, &list);
956 atchan->descs_allocated = 0;
957
958 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
959}
960
961
962/*-- Module Management -----------------------------------------------*/
963
964/**
965 * at_dma_off - disable DMA controller
966 * @atdma: the Atmel HDAMC device
967 */
968static void at_dma_off(struct at_dma *atdma)
969{
970 dma_writel(atdma, EN, 0);
971
972 /* disable all interrupts */
973 dma_writel(atdma, EBCIDR, -1L);
974
975 /* confirm that all channels are disabled */
976 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
977 cpu_relax();
978}
979
980static int __init at_dma_probe(struct platform_device *pdev)
981{
982 struct at_dma_platform_data *pdata;
983 struct resource *io;
984 struct at_dma *atdma;
985 size_t size;
986 int irq;
987 int err;
988 int i;
989
990 /* get DMA Controller parameters from platform */
991 pdata = pdev->dev.platform_data;
992 if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS)
993 return -EINVAL;
994
995 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
996 if (!io)
997 return -EINVAL;
998
999 irq = platform_get_irq(pdev, 0);
1000 if (irq < 0)
1001 return irq;
1002
1003 size = sizeof(struct at_dma);
1004 size += pdata->nr_channels * sizeof(struct at_dma_chan);
1005 atdma = kzalloc(size, GFP_KERNEL);
1006 if (!atdma)
1007 return -ENOMEM;
1008
1009 /* discover transaction capabilites from the platform data */
1010 atdma->dma_common.cap_mask = pdata->cap_mask;
1011 atdma->all_chan_mask = (1 << pdata->nr_channels) - 1;
1012
1013 size = io->end - io->start + 1;
1014 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1015 err = -EBUSY;
1016 goto err_kfree;
1017 }
1018
1019 atdma->regs = ioremap(io->start, size);
1020 if (!atdma->regs) {
1021 err = -ENOMEM;
1022 goto err_release_r;
1023 }
1024
1025 atdma->clk = clk_get(&pdev->dev, "dma_clk");
1026 if (IS_ERR(atdma->clk)) {
1027 err = PTR_ERR(atdma->clk);
1028 goto err_clk;
1029 }
1030 clk_enable(atdma->clk);
1031
1032 /* force dma off, just in case */
1033 at_dma_off(atdma);
1034
1035 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1036 if (err)
1037 goto err_irq;
1038
1039 platform_set_drvdata(pdev, atdma);
1040
1041 /* create a pool of consistent memory blocks for hardware descriptors */
1042 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1043 &pdev->dev, sizeof(struct at_desc),
1044 4 /* word alignment */, 0);
1045 if (!atdma->dma_desc_pool) {
1046 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1047 err = -ENOMEM;
1048 goto err_pool_create;
1049 }
1050
1051 /* clear any pending interrupt */
1052 while (dma_readl(atdma, EBCISR))
1053 cpu_relax();
1054
1055 /* initialize channels related values */
1056 INIT_LIST_HEAD(&atdma->dma_common.channels);
1057 for (i = 0; i < pdata->nr_channels; i++, atdma->dma_common.chancnt++) {
1058 struct at_dma_chan *atchan = &atdma->chan[i];
1059
1060 atchan->chan_common.device = &atdma->dma_common;
1061 atchan->chan_common.cookie = atchan->completed_cookie = 1;
1062 atchan->chan_common.chan_id = i;
1063 list_add_tail(&atchan->chan_common.device_node,
1064 &atdma->dma_common.channels);
1065
1066 atchan->ch_regs = atdma->regs + ch_regs(i);
1067 spin_lock_init(&atchan->lock);
1068 atchan->mask = 1 << i;
1069
1070 INIT_LIST_HEAD(&atchan->active_list);
1071 INIT_LIST_HEAD(&atchan->queue);
1072 INIT_LIST_HEAD(&atchan->free_list);
1073
1074 tasklet_init(&atchan->tasklet, atc_tasklet,
1075 (unsigned long)atchan);
1076 atc_enable_irq(atchan);
1077 }
1078
1079 /* set base routines */
1080 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1081 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
1082 atdma->dma_common.device_is_tx_complete = atc_is_tx_complete;
1083 atdma->dma_common.device_issue_pending = atc_issue_pending;
1084 atdma->dma_common.dev = &pdev->dev;
1085
1086 /* set prep routines based on capability */
1087 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1088 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1089
1090 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
1091 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1092 atdma->dma_common.device_terminate_all = atc_terminate_all;
1093 }
1094
1095 dma_writel(atdma, EN, AT_DMA_ENABLE);
1096
1097 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
1098 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1099 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
1100 atdma->dma_common.chancnt);
1101
1102 dma_async_device_register(&atdma->dma_common);
1103
1104 return 0;
1105
1106err_pool_create:
1107 platform_set_drvdata(pdev, NULL);
1108 free_irq(platform_get_irq(pdev, 0), atdma);
1109err_irq:
1110 clk_disable(atdma->clk);
1111 clk_put(atdma->clk);
1112err_clk:
1113 iounmap(atdma->regs);
1114 atdma->regs = NULL;
1115err_release_r:
1116 release_mem_region(io->start, size);
1117err_kfree:
1118 kfree(atdma);
1119 return err;
1120}
1121
1122static int __exit at_dma_remove(struct platform_device *pdev)
1123{
1124 struct at_dma *atdma = platform_get_drvdata(pdev);
1125 struct dma_chan *chan, *_chan;
1126 struct resource *io;
1127
1128 at_dma_off(atdma);
1129 dma_async_device_unregister(&atdma->dma_common);
1130
1131 dma_pool_destroy(atdma->dma_desc_pool);
1132 platform_set_drvdata(pdev, NULL);
1133 free_irq(platform_get_irq(pdev, 0), atdma);
1134
1135 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1136 device_node) {
1137 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1138
1139 /* Disable interrupts */
1140 atc_disable_irq(atchan);
1141 tasklet_disable(&atchan->tasklet);
1142
1143 tasklet_kill(&atchan->tasklet);
1144 list_del(&chan->device_node);
1145 }
1146
1147 clk_disable(atdma->clk);
1148 clk_put(atdma->clk);
1149
1150 iounmap(atdma->regs);
1151 atdma->regs = NULL;
1152
1153 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1154 release_mem_region(io->start, io->end - io->start + 1);
1155
1156 kfree(atdma);
1157
1158 return 0;
1159}
1160
1161static void at_dma_shutdown(struct platform_device *pdev)
1162{
1163 struct at_dma *atdma = platform_get_drvdata(pdev);
1164
1165 at_dma_off(platform_get_drvdata(pdev));
1166 clk_disable(atdma->clk);
1167}
1168
1169static int at_dma_suspend_late(struct platform_device *pdev, pm_message_t mesg)
1170{
1171 struct at_dma *atdma = platform_get_drvdata(pdev);
1172
1173 at_dma_off(platform_get_drvdata(pdev));
1174 clk_disable(atdma->clk);
1175 return 0;
1176}
1177
1178static int at_dma_resume_early(struct platform_device *pdev)
1179{
1180 struct at_dma *atdma = platform_get_drvdata(pdev);
1181
1182 clk_enable(atdma->clk);
1183 dma_writel(atdma, EN, AT_DMA_ENABLE);
1184 return 0;
1185
1186}
1187
1188static struct platform_driver at_dma_driver = {
1189 .remove = __exit_p(at_dma_remove),
1190 .shutdown = at_dma_shutdown,
1191 .suspend_late = at_dma_suspend_late,
1192 .resume_early = at_dma_resume_early,
1193 .driver = {
1194 .name = "at_hdmac",
1195 },
1196};
1197
1198static int __init at_dma_init(void)
1199{
1200 return platform_driver_probe(&at_dma_driver, at_dma_probe);
1201}
1202module_init(at_dma_init);
1203
1204static void __exit at_dma_exit(void)
1205{
1206 platform_driver_unregister(&at_dma_driver);
1207}
1208module_exit(at_dma_exit);
1209
1210MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
1211MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
1212MODULE_LICENSE("GPL");
1213MODULE_ALIAS("platform:at_hdmac");
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
new file mode 100644
index 000000000000..4c972afc49ec
--- /dev/null
+++ b/drivers/dma/at_hdmac_regs.h
@@ -0,0 +1,353 @@
1/*
2 * Header file for the Atmel AHB DMA Controller driver
3 *
4 * Copyright (C) 2008 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11#ifndef AT_HDMAC_REGS_H
12#define AT_HDMAC_REGS_H
13
14#include <mach/at_hdmac.h>
15
16#define AT_DMA_MAX_NR_CHANNELS 8
17
18
19#define AT_DMA_GCFG 0x00 /* Global Configuration Register */
20#define AT_DMA_IF_BIGEND(i) (0x1 << (i)) /* AHB-Lite Interface i in Big-endian mode */
21#define AT_DMA_ARB_CFG (0x1 << 4) /* Arbiter mode. */
22#define AT_DMA_ARB_CFG_FIXED (0x0 << 4)
23#define AT_DMA_ARB_CFG_ROUND_ROBIN (0x1 << 4)
24
25#define AT_DMA_EN 0x04 /* Controller Enable Register */
26#define AT_DMA_ENABLE (0x1 << 0)
27
28#define AT_DMA_SREQ 0x08 /* Software Single Request Register */
29#define AT_DMA_SSREQ(x) (0x1 << ((x) << 1)) /* Request a source single transfer on channel x */
30#define AT_DMA_DSREQ(x) (0x1 << (1 + ((x) << 1))) /* Request a destination single transfer on channel x */
31
32#define AT_DMA_CREQ 0x0C /* Software Chunk Transfer Request Register */
33#define AT_DMA_SCREQ(x) (0x1 << ((x) << 1)) /* Request a source chunk transfer on channel x */
34#define AT_DMA_DCREQ(x) (0x1 << (1 + ((x) << 1))) /* Request a destination chunk transfer on channel x */
35
36#define AT_DMA_LAST 0x10 /* Software Last Transfer Flag Register */
37#define AT_DMA_SLAST(x) (0x1 << ((x) << 1)) /* This src rq is last tx of buffer on channel x */
38#define AT_DMA_DLAST(x) (0x1 << (1 + ((x) << 1))) /* This dst rq is last tx of buffer on channel x */
39
40#define AT_DMA_SYNC 0x14 /* Request Synchronization Register */
41#define AT_DMA_SYR(h) (0x1 << (h)) /* Synchronize handshake line h */
42
43/* Error, Chained Buffer transfer completed and Buffer transfer completed Interrupt registers */
44#define AT_DMA_EBCIER 0x18 /* Enable register */
45#define AT_DMA_EBCIDR 0x1C /* Disable register */
46#define AT_DMA_EBCIMR 0x20 /* Mask Register */
47#define AT_DMA_EBCISR 0x24 /* Status Register */
48#define AT_DMA_CBTC_OFFSET 8
49#define AT_DMA_ERR_OFFSET 16
50#define AT_DMA_BTC(x) (0x1 << (x))
51#define AT_DMA_CBTC(x) (0x1 << (AT_DMA_CBTC_OFFSET + (x)))
52#define AT_DMA_ERR(x) (0x1 << (AT_DMA_ERR_OFFSET + (x)))
53
54#define AT_DMA_CHER 0x28 /* Channel Handler Enable Register */
55#define AT_DMA_ENA(x) (0x1 << (x))
56#define AT_DMA_SUSP(x) (0x1 << ( 8 + (x)))
57#define AT_DMA_KEEP(x) (0x1 << (24 + (x)))
58
59#define AT_DMA_CHDR 0x2C /* Channel Handler Disable Register */
60#define AT_DMA_DIS(x) (0x1 << (x))
61#define AT_DMA_RES(x) (0x1 << ( 8 + (x)))
62
63#define AT_DMA_CHSR 0x30 /* Channel Handler Status Register */
64#define AT_DMA_EMPT(x) (0x1 << (16 + (x)))
65#define AT_DMA_STAL(x) (0x1 << (24 + (x)))
66
67
68#define AT_DMA_CH_REGS_BASE 0x3C /* Channel registers base address */
69#define ch_regs(x) (AT_DMA_CH_REGS_BASE + (x) * 0x28) /* Channel x base addr */
70
71/* Hardware register offset for each channel */
72#define ATC_SADDR_OFFSET 0x00 /* Source Address Register */
73#define ATC_DADDR_OFFSET 0x04 /* Destination Address Register */
74#define ATC_DSCR_OFFSET 0x08 /* Descriptor Address Register */
75#define ATC_CTRLA_OFFSET 0x0C /* Control A Register */
76#define ATC_CTRLB_OFFSET 0x10 /* Control B Register */
77#define ATC_CFG_OFFSET 0x14 /* Configuration Register */
78#define ATC_SPIP_OFFSET 0x18 /* Src PIP Configuration Register */
79#define ATC_DPIP_OFFSET 0x1C /* Dst PIP Configuration Register */
80
81
82/* Bitfield definitions */
83
84/* Bitfields in DSCR */
85#define ATC_DSCR_IF(i) (0x3 & (i)) /* Dsc feched via AHB-Lite Interface i */
86
87/* Bitfields in CTRLA */
88#define ATC_BTSIZE_MAX 0xFFFFUL /* Maximum Buffer Transfer Size */
89#define ATC_BTSIZE(x) (ATC_BTSIZE_MAX & (x)) /* Buffer Transfer Size */
90/* Chunck Tranfer size definitions are in at_hdmac.h */
91#define ATC_SRC_WIDTH_MASK (0x3 << 24) /* Source Single Transfer Size */
92#define ATC_SRC_WIDTH(x) ((x) << 24)
93#define ATC_SRC_WIDTH_BYTE (0x0 << 24)
94#define ATC_SRC_WIDTH_HALFWORD (0x1 << 24)
95#define ATC_SRC_WIDTH_WORD (0x2 << 24)
96#define ATC_DST_WIDTH_MASK (0x3 << 28) /* Destination Single Transfer Size */
97#define ATC_DST_WIDTH(x) ((x) << 28)
98#define ATC_DST_WIDTH_BYTE (0x0 << 28)
99#define ATC_DST_WIDTH_HALFWORD (0x1 << 28)
100#define ATC_DST_WIDTH_WORD (0x2 << 28)
101#define ATC_DONE (0x1 << 31) /* Tx Done (only written back in descriptor) */
102
103/* Bitfields in CTRLB */
104#define ATC_SIF(i) (0x3 & (i)) /* Src tx done via AHB-Lite Interface i */
105#define ATC_DIF(i) ((0x3 & (i)) << 4) /* Dst tx done via AHB-Lite Interface i */
106#define ATC_SRC_PIP (0x1 << 8) /* Source Picture-in-Picture enabled */
107#define ATC_DST_PIP (0x1 << 12) /* Destination Picture-in-Picture enabled */
108#define ATC_SRC_DSCR_DIS (0x1 << 16) /* Src Descriptor fetch disable */
109#define ATC_DST_DSCR_DIS (0x1 << 20) /* Dst Descriptor fetch disable */
110#define ATC_FC_MASK (0x7 << 21) /* Choose Flow Controller */
111#define ATC_FC_MEM2MEM (0x0 << 21) /* Mem-to-Mem (DMA) */
112#define ATC_FC_MEM2PER (0x1 << 21) /* Mem-to-Periph (DMA) */
113#define ATC_FC_PER2MEM (0x2 << 21) /* Periph-to-Mem (DMA) */
114#define ATC_FC_PER2PER (0x3 << 21) /* Periph-to-Periph (DMA) */
115#define ATC_FC_PER2MEM_PER (0x4 << 21) /* Periph-to-Mem (Peripheral) */
116#define ATC_FC_MEM2PER_PER (0x5 << 21) /* Mem-to-Periph (Peripheral) */
117#define ATC_FC_PER2PER_SRCPER (0x6 << 21) /* Periph-to-Periph (Src Peripheral) */
118#define ATC_FC_PER2PER_DSTPER (0x7 << 21) /* Periph-to-Periph (Dst Peripheral) */
119#define ATC_SRC_ADDR_MODE_MASK (0x3 << 24)
120#define ATC_SRC_ADDR_MODE_INCR (0x0 << 24) /* Incrementing Mode */
121#define ATC_SRC_ADDR_MODE_DECR (0x1 << 24) /* Decrementing Mode */
122#define ATC_SRC_ADDR_MODE_FIXED (0x2 << 24) /* Fixed Mode */
123#define ATC_DST_ADDR_MODE_MASK (0x3 << 28)
124#define ATC_DST_ADDR_MODE_INCR (0x0 << 28) /* Incrementing Mode */
125#define ATC_DST_ADDR_MODE_DECR (0x1 << 28) /* Decrementing Mode */
126#define ATC_DST_ADDR_MODE_FIXED (0x2 << 28) /* Fixed Mode */
127#define ATC_IEN (0x1 << 30) /* BTC interrupt enable (active low) */
128#define ATC_AUTO (0x1 << 31) /* Auto multiple buffer tx enable */
129
130/* Bitfields in CFG */
131/* are in at_hdmac.h */
132
133/* Bitfields in SPIP */
134#define ATC_SPIP_HOLE(x) (0xFFFFU & (x))
135#define ATC_SPIP_BOUNDARY(x) ((0x3FF & (x)) << 16)
136
137/* Bitfields in DPIP */
138#define ATC_DPIP_HOLE(x) (0xFFFFU & (x))
139#define ATC_DPIP_BOUNDARY(x) ((0x3FF & (x)) << 16)
140
141
142/*-- descriptors -----------------------------------------------------*/
143
144/* LLI == Linked List Item; aka DMA buffer descriptor */
145struct at_lli {
146 /* values that are not changed by hardware */
147 dma_addr_t saddr;
148 dma_addr_t daddr;
149 /* value that may get written back: */
150 u32 ctrla;
151 /* more values that are not changed by hardware */
152 u32 ctrlb;
153 dma_addr_t dscr; /* chain to next lli */
154};
155
156/**
157 * struct at_desc - software descriptor
158 * @at_lli: hardware lli structure
159 * @txd: support for the async_tx api
160 * @desc_node: node on the channed descriptors list
161 * @len: total transaction bytecount
162 */
163struct at_desc {
164 /* FIRST values the hardware uses */
165 struct at_lli lli;
166
167 /* THEN values for driver housekeeping */
168 struct dma_async_tx_descriptor txd;
169 struct list_head desc_node;
170 size_t len;
171};
172
173static inline struct at_desc *
174txd_to_at_desc(struct dma_async_tx_descriptor *txd)
175{
176 return container_of(txd, struct at_desc, txd);
177}
178
179
180/*-- Channels --------------------------------------------------------*/
181
182/**
183 * struct at_dma_chan - internal representation of an Atmel HDMAC channel
184 * @chan_common: common dmaengine channel object members
185 * @device: parent device
186 * @ch_regs: memory mapped register base
187 * @mask: channel index in a mask
188 * @error_status: transmit error status information from irq handler
189 * to tasklet (use atomic operations)
190 * @tasklet: bottom half to finish transaction work
191 * @lock: serializes enqueue/dequeue operations to descriptors lists
192 * @completed_cookie: identifier for the most recently completed operation
193 * @active_list: list of descriptors dmaengine is being running on
194 * @queue: list of descriptors ready to be submitted to engine
195 * @free_list: list of descriptors usable by the channel
196 * @descs_allocated: records the actual size of the descriptor pool
197 */
198struct at_dma_chan {
199 struct dma_chan chan_common;
200 struct at_dma *device;
201 void __iomem *ch_regs;
202 u8 mask;
203 unsigned long error_status;
204 struct tasklet_struct tasklet;
205
206 spinlock_t lock;
207
208 /* these other elements are all protected by lock */
209 dma_cookie_t completed_cookie;
210 struct list_head active_list;
211 struct list_head queue;
212 struct list_head free_list;
213 unsigned int descs_allocated;
214};
215
216#define channel_readl(atchan, name) \
217 __raw_readl((atchan)->ch_regs + ATC_##name##_OFFSET)
218
219#define channel_writel(atchan, name, val) \
220 __raw_writel((val), (atchan)->ch_regs + ATC_##name##_OFFSET)
221
222static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan)
223{
224 return container_of(dchan, struct at_dma_chan, chan_common);
225}
226
227
228/*-- Controller ------------------------------------------------------*/
229
230/**
231 * struct at_dma - internal representation of an Atmel HDMA Controller
232 * @chan_common: common dmaengine dma_device object members
233 * @ch_regs: memory mapped register base
234 * @clk: dma controller clock
235 * @all_chan_mask: all channels availlable in a mask
236 * @dma_desc_pool: base of DMA descriptor region (DMA address)
237 * @chan: channels table to store at_dma_chan structures
238 */
239struct at_dma {
240 struct dma_device dma_common;
241 void __iomem *regs;
242 struct clk *clk;
243
244 u8 all_chan_mask;
245
246 struct dma_pool *dma_desc_pool;
247 /* AT THE END channels table */
248 struct at_dma_chan chan[0];
249};
250
251#define dma_readl(atdma, name) \
252 __raw_readl((atdma)->regs + AT_DMA_##name)
253#define dma_writel(atdma, name, val) \
254 __raw_writel((val), (atdma)->regs + AT_DMA_##name)
255
256static inline struct at_dma *to_at_dma(struct dma_device *ddev)
257{
258 return container_of(ddev, struct at_dma, dma_common);
259}
260
261
262/*-- Helper functions ------------------------------------------------*/
263
264static struct device *chan2dev(struct dma_chan *chan)
265{
266 return &chan->dev->device;
267}
268static struct device *chan2parent(struct dma_chan *chan)
269{
270 return chan->dev->device.parent;
271}
272
273#if defined(VERBOSE_DEBUG)
274static void vdbg_dump_regs(struct at_dma_chan *atchan)
275{
276 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
277
278 dev_err(chan2dev(&atchan->chan_common),
279 " channel %d : imr = 0x%x, chsr = 0x%x\n",
280 atchan->chan_common.chan_id,
281 dma_readl(atdma, EBCIMR),
282 dma_readl(atdma, CHSR));
283
284 dev_err(chan2dev(&atchan->chan_common),
285 " channel: s0x%x d0x%x ctrl0x%x:0x%x cfg0x%x l0x%x\n",
286 channel_readl(atchan, SADDR),
287 channel_readl(atchan, DADDR),
288 channel_readl(atchan, CTRLA),
289 channel_readl(atchan, CTRLB),
290 channel_readl(atchan, CFG),
291 channel_readl(atchan, DSCR));
292}
293#else
294static void vdbg_dump_regs(struct at_dma_chan *atchan) {}
295#endif
296
297static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli)
298{
299 dev_printk(KERN_CRIT, chan2dev(&atchan->chan_common),
300 " desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
301 lli->saddr, lli->daddr,
302 lli->ctrla, lli->ctrlb, lli->dscr);
303}
304
305
306static void atc_setup_irq(struct at_dma_chan *atchan, int on)
307{
308 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
309 u32 ebci;
310
311 /* enable interrupts on buffer chain completion & error */
312 ebci = AT_DMA_CBTC(atchan->chan_common.chan_id)
313 | AT_DMA_ERR(atchan->chan_common.chan_id);
314 if (on)
315 dma_writel(atdma, EBCIER, ebci);
316 else
317 dma_writel(atdma, EBCIDR, ebci);
318}
319
320static inline void atc_enable_irq(struct at_dma_chan *atchan)
321{
322 atc_setup_irq(atchan, 1);
323}
324
325static inline void atc_disable_irq(struct at_dma_chan *atchan)
326{
327 atc_setup_irq(atchan, 0);
328}
329
330
331/**
332 * atc_chan_is_enabled - test if given channel is enabled
333 * @atchan: channel we want to test status
334 */
335static inline int atc_chan_is_enabled(struct at_dma_chan *atchan)
336{
337 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
338
339 return !!(dma_readl(atdma, CHSR) & atchan->mask);
340}
341
342
343/**
344 * set_desc_eol - set end-of-link to descriptor so it will end transfer
345 * @desc: descriptor, signle or at the end of a chain, to end chain on
346 */
347static void set_desc_eol(struct at_desc *desc)
348{
349 desc->lli.ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS;
350 desc->lli.dscr = 0;
351}
352
353#endif /* AT_HDMAC_REGS_H */
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index fb7da5141e96..d93017fc7872 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -38,6 +38,11 @@ module_param(max_channels, uint, S_IRUGO);
38MODULE_PARM_DESC(max_channels, 38MODULE_PARM_DESC(max_channels,
39 "Maximum number of channels to use (default: all)"); 39 "Maximum number of channels to use (default: all)");
40 40
41static unsigned int iterations;
42module_param(iterations, uint, S_IRUGO);
43MODULE_PARM_DESC(iterations,
44 "Iterations before stopping test (default: infinite)");
45
41static unsigned int xor_sources = 3; 46static unsigned int xor_sources = 3;
42module_param(xor_sources, uint, S_IRUGO); 47module_param(xor_sources, uint, S_IRUGO);
43MODULE_PARM_DESC(xor_sources, 48MODULE_PARM_DESC(xor_sources,
@@ -114,7 +119,7 @@ static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len)
114 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); 119 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
115 for ( ; i < start + len; i++) 120 for ( ; i < start + len; i++)
116 buf[i] = PATTERN_SRC | PATTERN_COPY 121 buf[i] = PATTERN_SRC | PATTERN_COPY
117 | (~i & PATTERN_COUNT_MASK);; 122 | (~i & PATTERN_COUNT_MASK);
118 for ( ; i < test_buf_size; i++) 123 for ( ; i < test_buf_size; i++)
119 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); 124 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
120 buf++; 125 buf++;
@@ -270,7 +275,8 @@ static int dmatest_func(void *data)
270 275
271 flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT; 276 flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT;
272 277
273 while (!kthread_should_stop()) { 278 while (!kthread_should_stop()
279 && !(iterations && total_tests >= iterations)) {
274 struct dma_device *dev = chan->device; 280 struct dma_device *dev = chan->device;
275 struct dma_async_tx_descriptor *tx = NULL; 281 struct dma_async_tx_descriptor *tx = NULL;
276 dma_addr_t dma_srcs[src_cnt]; 282 dma_addr_t dma_srcs[src_cnt];
@@ -416,6 +422,13 @@ err_srcbuf:
416err_srcs: 422err_srcs:
417 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", 423 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
418 thread_name, total_tests, failed_tests, ret); 424 thread_name, total_tests, failed_tests, ret);
425
426 if (iterations > 0)
427 while (!kthread_should_stop()) {
428 DECLARE_WAIT_QUEUE_HEAD(wait_dmatest_exit);
429 interruptible_sleep_on(&wait_dmatest_exit);
430 }
431
419 return ret; 432 return ret;
420} 433}
421 434
@@ -495,11 +508,11 @@ static int dmatest_add_channel(struct dma_chan *chan)
495 508
496 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { 509 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
497 cnt = dmatest_add_threads(dtc, DMA_MEMCPY); 510 cnt = dmatest_add_threads(dtc, DMA_MEMCPY);
498 thread_count += cnt > 0 ?: 0; 511 thread_count += cnt > 0 ? cnt : 0;
499 } 512 }
500 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 513 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
501 cnt = dmatest_add_threads(dtc, DMA_XOR); 514 cnt = dmatest_add_threads(dtc, DMA_XOR);
502 thread_count += cnt > 0 ?: 0; 515 thread_count += cnt > 0 ? cnt : 0;
503 } 516 }
504 517
505 pr_info("dmatest: Started %u threads using %s\n", 518 pr_info("dmatest: Started %u threads using %s\n",
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index f18d1bde0439..ef87a8984145 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -12,6 +12,11 @@
12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc. 12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13 * The support for MPC8349 DMA contorller is also added. 13 * The support for MPC8349 DMA contorller is also added.
14 * 14 *
15 * This driver instructs the DMA controller to issue the PCI Read Multiple
16 * command for PCI read operations, instead of using the default PCI Read Line
17 * command. Please be aware that this setting may result in read pre-fetching
18 * on some platforms.
19 *
15 * This is free software; you can redistribute it and/or modify 20 * This is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by 21 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or 22 * the Free Software Foundation; either version 2 of the License, or
@@ -49,9 +54,10 @@ static void dma_init(struct fsl_dma_chan *fsl_chan)
49 case FSL_DMA_IP_83XX: 54 case FSL_DMA_IP_83XX:
50 /* Set the channel to below modes: 55 /* Set the channel to below modes:
51 * EOTIE - End-of-transfer interrupt enable 56 * EOTIE - End-of-transfer interrupt enable
57 * PRC_RM - PCI read multiple
52 */ 58 */
53 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE, 59 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE
54 32); 60 | FSL_DMA_MR_PRC_RM, 32);
55 break; 61 break;
56 } 62 }
57 63
@@ -136,15 +142,16 @@ static int dma_is_idle(struct fsl_dma_chan *fsl_chan)
136 142
137static void dma_start(struct fsl_dma_chan *fsl_chan) 143static void dma_start(struct fsl_dma_chan *fsl_chan)
138{ 144{
139 u32 mr_set = 0;; 145 u32 mr_set = 0;
140 146
141 if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { 147 if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
142 DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32); 148 DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32);
143 mr_set |= FSL_DMA_MR_EMP_EN; 149 mr_set |= FSL_DMA_MR_EMP_EN;
144 } else 150 } else if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
145 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 151 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
146 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) 152 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
147 & ~FSL_DMA_MR_EMP_EN, 32); 153 & ~FSL_DMA_MR_EMP_EN, 32);
154 }
148 155
149 if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT) 156 if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT)
150 mr_set |= FSL_DMA_MR_EMS_EN; 157 mr_set |= FSL_DMA_MR_EMS_EN;
@@ -871,9 +878,9 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
871 878
872 switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) { 879 switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) {
873 case FSL_DMA_IP_85XX: 880 case FSL_DMA_IP_85XX:
874 new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start;
875 new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; 881 new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
876 case FSL_DMA_IP_83XX: 882 case FSL_DMA_IP_83XX:
883 new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start;
877 new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size; 884 new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size;
878 new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size; 885 new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size;
879 } 886 }
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 4f21a512d848..dc7f26865797 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -38,6 +38,7 @@
38 38
39/* Special MR definition for MPC8349 */ 39/* Special MR definition for MPC8349 */
40#define FSL_DMA_MR_EOTIE 0x00000080 40#define FSL_DMA_MR_EOTIE 0x00000080
41#define FSL_DMA_MR_PRC_RM 0x00000800
41 42
42#define FSL_DMA_SR_CH 0x00000020 43#define FSL_DMA_SR_CH 0x00000020
43#define FSL_DMA_SR_PE 0x00000010 44#define FSL_DMA_SR_PE 0x00000010
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index ddab94f51224..3f23eabe09f2 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1176,7 +1176,7 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
1176 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) 1176 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
1177 dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset; 1177 dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset;
1178 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1178 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1179 dma_dev->max_xor = 8; ; 1179 dma_dev->max_xor = 8;
1180 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; 1180 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1181 } 1181 }
1182 1182
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
new file mode 100644
index 000000000000..88dab52926f4
--- /dev/null
+++ b/drivers/dma/txx9dmac.c
@@ -0,0 +1,1358 @@
1/*
2 * Driver for the TXx9 SoC DMA Controller
3 *
4 * Copyright (C) 2009 Atsushi Nemoto
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/dma-mapping.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/module.h>
15#include <linux/platform_device.h>
16#include <linux/slab.h>
17#include <linux/scatterlist.h>
18#include "txx9dmac.h"
19
20static struct txx9dmac_chan *to_txx9dmac_chan(struct dma_chan *chan)
21{
22 return container_of(chan, struct txx9dmac_chan, chan);
23}
24
25static struct txx9dmac_cregs __iomem *__dma_regs(const struct txx9dmac_chan *dc)
26{
27 return dc->ch_regs;
28}
29
30static struct txx9dmac_cregs32 __iomem *__dma_regs32(
31 const struct txx9dmac_chan *dc)
32{
33 return dc->ch_regs;
34}
35
36#define channel64_readq(dc, name) \
37 __raw_readq(&(__dma_regs(dc)->name))
38#define channel64_writeq(dc, name, val) \
39 __raw_writeq((val), &(__dma_regs(dc)->name))
40#define channel64_readl(dc, name) \
41 __raw_readl(&(__dma_regs(dc)->name))
42#define channel64_writel(dc, name, val) \
43 __raw_writel((val), &(__dma_regs(dc)->name))
44
45#define channel32_readl(dc, name) \
46 __raw_readl(&(__dma_regs32(dc)->name))
47#define channel32_writel(dc, name, val) \
48 __raw_writel((val), &(__dma_regs32(dc)->name))
49
50#define channel_readq(dc, name) channel64_readq(dc, name)
51#define channel_writeq(dc, name, val) channel64_writeq(dc, name, val)
52#define channel_readl(dc, name) \
53 (is_dmac64(dc) ? \
54 channel64_readl(dc, name) : channel32_readl(dc, name))
55#define channel_writel(dc, name, val) \
56 (is_dmac64(dc) ? \
57 channel64_writel(dc, name, val) : channel32_writel(dc, name, val))
58
59static dma_addr_t channel64_read_CHAR(const struct txx9dmac_chan *dc)
60{
61 if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64))
62 return channel64_readq(dc, CHAR);
63 else
64 return channel64_readl(dc, CHAR);
65}
66
67static void channel64_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val)
68{
69 if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64))
70 channel64_writeq(dc, CHAR, val);
71 else
72 channel64_writel(dc, CHAR, val);
73}
74
75static void channel64_clear_CHAR(const struct txx9dmac_chan *dc)
76{
77#if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR)
78 channel64_writel(dc, CHAR, 0);
79 channel64_writel(dc, __pad_CHAR, 0);
80#else
81 channel64_writeq(dc, CHAR, 0);
82#endif
83}
84
85static dma_addr_t channel_read_CHAR(const struct txx9dmac_chan *dc)
86{
87 if (is_dmac64(dc))
88 return channel64_read_CHAR(dc);
89 else
90 return channel32_readl(dc, CHAR);
91}
92
93static void channel_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val)
94{
95 if (is_dmac64(dc))
96 channel64_write_CHAR(dc, val);
97 else
98 channel32_writel(dc, CHAR, val);
99}
100
101static struct txx9dmac_regs __iomem *__txx9dmac_regs(
102 const struct txx9dmac_dev *ddev)
103{
104 return ddev->regs;
105}
106
107static struct txx9dmac_regs32 __iomem *__txx9dmac_regs32(
108 const struct txx9dmac_dev *ddev)
109{
110 return ddev->regs;
111}
112
113#define dma64_readl(ddev, name) \
114 __raw_readl(&(__txx9dmac_regs(ddev)->name))
115#define dma64_writel(ddev, name, val) \
116 __raw_writel((val), &(__txx9dmac_regs(ddev)->name))
117
118#define dma32_readl(ddev, name) \
119 __raw_readl(&(__txx9dmac_regs32(ddev)->name))
120#define dma32_writel(ddev, name, val) \
121 __raw_writel((val), &(__txx9dmac_regs32(ddev)->name))
122
123#define dma_readl(ddev, name) \
124 (__is_dmac64(ddev) ? \
125 dma64_readl(ddev, name) : dma32_readl(ddev, name))
126#define dma_writel(ddev, name, val) \
127 (__is_dmac64(ddev) ? \
128 dma64_writel(ddev, name, val) : dma32_writel(ddev, name, val))
129
130static struct device *chan2dev(struct dma_chan *chan)
131{
132 return &chan->dev->device;
133}
134static struct device *chan2parent(struct dma_chan *chan)
135{
136 return chan->dev->device.parent;
137}
138
139static struct txx9dmac_desc *
140txd_to_txx9dmac_desc(struct dma_async_tx_descriptor *txd)
141{
142 return container_of(txd, struct txx9dmac_desc, txd);
143}
144
145static dma_addr_t desc_read_CHAR(const struct txx9dmac_chan *dc,
146 const struct txx9dmac_desc *desc)
147{
148 return is_dmac64(dc) ? desc->hwdesc.CHAR : desc->hwdesc32.CHAR;
149}
150
151static void desc_write_CHAR(const struct txx9dmac_chan *dc,
152 struct txx9dmac_desc *desc, dma_addr_t val)
153{
154 if (is_dmac64(dc))
155 desc->hwdesc.CHAR = val;
156 else
157 desc->hwdesc32.CHAR = val;
158}
159
160#define TXX9_DMA_MAX_COUNT 0x04000000
161
162#define TXX9_DMA_INITIAL_DESC_COUNT 64
163
164static struct txx9dmac_desc *txx9dmac_first_active(struct txx9dmac_chan *dc)
165{
166 return list_entry(dc->active_list.next,
167 struct txx9dmac_desc, desc_node);
168}
169
170static struct txx9dmac_desc *txx9dmac_last_active(struct txx9dmac_chan *dc)
171{
172 return list_entry(dc->active_list.prev,
173 struct txx9dmac_desc, desc_node);
174}
175
176static struct txx9dmac_desc *txx9dmac_first_queued(struct txx9dmac_chan *dc)
177{
178 return list_entry(dc->queue.next, struct txx9dmac_desc, desc_node);
179}
180
181static struct txx9dmac_desc *txx9dmac_last_child(struct txx9dmac_desc *desc)
182{
183 if (!list_empty(&desc->txd.tx_list))
184 desc = list_entry(desc->txd.tx_list.prev,
185 struct txx9dmac_desc, desc_node);
186 return desc;
187}
188
189static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx);
190
191static struct txx9dmac_desc *txx9dmac_desc_alloc(struct txx9dmac_chan *dc,
192 gfp_t flags)
193{
194 struct txx9dmac_dev *ddev = dc->ddev;
195 struct txx9dmac_desc *desc;
196
197 desc = kzalloc(sizeof(*desc), flags);
198 if (!desc)
199 return NULL;
200 dma_async_tx_descriptor_init(&desc->txd, &dc->chan);
201 desc->txd.tx_submit = txx9dmac_tx_submit;
202 /* txd.flags will be overwritten in prep funcs */
203 desc->txd.flags = DMA_CTRL_ACK;
204 desc->txd.phys = dma_map_single(chan2parent(&dc->chan), &desc->hwdesc,
205 ddev->descsize, DMA_TO_DEVICE);
206 return desc;
207}
208
209static struct txx9dmac_desc *txx9dmac_desc_get(struct txx9dmac_chan *dc)
210{
211 struct txx9dmac_desc *desc, *_desc;
212 struct txx9dmac_desc *ret = NULL;
213 unsigned int i = 0;
214
215 spin_lock_bh(&dc->lock);
216 list_for_each_entry_safe(desc, _desc, &dc->free_list, desc_node) {
217 if (async_tx_test_ack(&desc->txd)) {
218 list_del(&desc->desc_node);
219 ret = desc;
220 break;
221 }
222 dev_dbg(chan2dev(&dc->chan), "desc %p not ACKed\n", desc);
223 i++;
224 }
225 spin_unlock_bh(&dc->lock);
226
227 dev_vdbg(chan2dev(&dc->chan), "scanned %u descriptors on freelist\n",
228 i);
229 if (!ret) {
230 ret = txx9dmac_desc_alloc(dc, GFP_ATOMIC);
231 if (ret) {
232 spin_lock_bh(&dc->lock);
233 dc->descs_allocated++;
234 spin_unlock_bh(&dc->lock);
235 } else
236 dev_err(chan2dev(&dc->chan),
237 "not enough descriptors available\n");
238 }
239 return ret;
240}
241
242static void txx9dmac_sync_desc_for_cpu(struct txx9dmac_chan *dc,
243 struct txx9dmac_desc *desc)
244{
245 struct txx9dmac_dev *ddev = dc->ddev;
246 struct txx9dmac_desc *child;
247
248 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
249 dma_sync_single_for_cpu(chan2parent(&dc->chan),
250 child->txd.phys, ddev->descsize,
251 DMA_TO_DEVICE);
252 dma_sync_single_for_cpu(chan2parent(&dc->chan),
253 desc->txd.phys, ddev->descsize,
254 DMA_TO_DEVICE);
255}
256
257/*
258 * Move a descriptor, including any children, to the free list.
259 * `desc' must not be on any lists.
260 */
261static void txx9dmac_desc_put(struct txx9dmac_chan *dc,
262 struct txx9dmac_desc *desc)
263{
264 if (desc) {
265 struct txx9dmac_desc *child;
266
267 txx9dmac_sync_desc_for_cpu(dc, desc);
268
269 spin_lock_bh(&dc->lock);
270 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
271 dev_vdbg(chan2dev(&dc->chan),
272 "moving child desc %p to freelist\n",
273 child);
274 list_splice_init(&desc->txd.tx_list, &dc->free_list);
275 dev_vdbg(chan2dev(&dc->chan), "moving desc %p to freelist\n",
276 desc);
277 list_add(&desc->desc_node, &dc->free_list);
278 spin_unlock_bh(&dc->lock);
279 }
280}
281
282/* Called with dc->lock held and bh disabled */
283static dma_cookie_t
284txx9dmac_assign_cookie(struct txx9dmac_chan *dc, struct txx9dmac_desc *desc)
285{
286 dma_cookie_t cookie = dc->chan.cookie;
287
288 if (++cookie < 0)
289 cookie = 1;
290
291 dc->chan.cookie = cookie;
292 desc->txd.cookie = cookie;
293
294 return cookie;
295}
296
297/*----------------------------------------------------------------------*/
298
299static void txx9dmac_dump_regs(struct txx9dmac_chan *dc)
300{
301 if (is_dmac64(dc))
302 dev_err(chan2dev(&dc->chan),
303 " CHAR: %#llx SAR: %#llx DAR: %#llx CNTR: %#x"
304 " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n",
305 (u64)channel64_read_CHAR(dc),
306 channel64_readq(dc, SAR),
307 channel64_readq(dc, DAR),
308 channel64_readl(dc, CNTR),
309 channel64_readl(dc, SAIR),
310 channel64_readl(dc, DAIR),
311 channel64_readl(dc, CCR),
312 channel64_readl(dc, CSR));
313 else
314 dev_err(chan2dev(&dc->chan),
315 " CHAR: %#x SAR: %#x DAR: %#x CNTR: %#x"
316 " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n",
317 channel32_readl(dc, CHAR),
318 channel32_readl(dc, SAR),
319 channel32_readl(dc, DAR),
320 channel32_readl(dc, CNTR),
321 channel32_readl(dc, SAIR),
322 channel32_readl(dc, DAIR),
323 channel32_readl(dc, CCR),
324 channel32_readl(dc, CSR));
325}
326
327static void txx9dmac_reset_chan(struct txx9dmac_chan *dc)
328{
329 channel_writel(dc, CCR, TXX9_DMA_CCR_CHRST);
330 if (is_dmac64(dc)) {
331 channel64_clear_CHAR(dc);
332 channel_writeq(dc, SAR, 0);
333 channel_writeq(dc, DAR, 0);
334 } else {
335 channel_writel(dc, CHAR, 0);
336 channel_writel(dc, SAR, 0);
337 channel_writel(dc, DAR, 0);
338 }
339 channel_writel(dc, CNTR, 0);
340 channel_writel(dc, SAIR, 0);
341 channel_writel(dc, DAIR, 0);
342 channel_writel(dc, CCR, 0);
343 mmiowb();
344}
345
346/* Called with dc->lock held and bh disabled */
347static void txx9dmac_dostart(struct txx9dmac_chan *dc,
348 struct txx9dmac_desc *first)
349{
350 struct txx9dmac_slave *ds = dc->chan.private;
351 u32 sai, dai;
352
353 dev_vdbg(chan2dev(&dc->chan), "dostart %u %p\n",
354 first->txd.cookie, first);
355 /* ASSERT: channel is idle */
356 if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) {
357 dev_err(chan2dev(&dc->chan),
358 "BUG: Attempted to start non-idle channel\n");
359 txx9dmac_dump_regs(dc);
360 /* The tasklet will hopefully advance the queue... */
361 return;
362 }
363
364 if (is_dmac64(dc)) {
365 channel64_writel(dc, CNTR, 0);
366 channel64_writel(dc, CSR, 0xffffffff);
367 if (ds) {
368 if (ds->tx_reg) {
369 sai = ds->reg_width;
370 dai = 0;
371 } else {
372 sai = 0;
373 dai = ds->reg_width;
374 }
375 } else {
376 sai = 8;
377 dai = 8;
378 }
379 channel64_writel(dc, SAIR, sai);
380 channel64_writel(dc, DAIR, dai);
381 /* All 64-bit DMAC supports SMPCHN */
382 channel64_writel(dc, CCR, dc->ccr);
383 /* Writing a non zero value to CHAR will assert XFACT */
384 channel64_write_CHAR(dc, first->txd.phys);
385 } else {
386 channel32_writel(dc, CNTR, 0);
387 channel32_writel(dc, CSR, 0xffffffff);
388 if (ds) {
389 if (ds->tx_reg) {
390 sai = ds->reg_width;
391 dai = 0;
392 } else {
393 sai = 0;
394 dai = ds->reg_width;
395 }
396 } else {
397 sai = 4;
398 dai = 4;
399 }
400 channel32_writel(dc, SAIR, sai);
401 channel32_writel(dc, DAIR, dai);
402 if (txx9_dma_have_SMPCHN()) {
403 channel32_writel(dc, CCR, dc->ccr);
404 /* Writing a non zero value to CHAR will assert XFACT */
405 channel32_writel(dc, CHAR, first->txd.phys);
406 } else {
407 channel32_writel(dc, CHAR, first->txd.phys);
408 channel32_writel(dc, CCR, dc->ccr);
409 }
410 }
411}
412
413/*----------------------------------------------------------------------*/
414
415static void
416txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
417 struct txx9dmac_desc *desc)
418{
419 dma_async_tx_callback callback;
420 void *param;
421 struct dma_async_tx_descriptor *txd = &desc->txd;
422 struct txx9dmac_slave *ds = dc->chan.private;
423
424 dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
425 txd->cookie, desc);
426
427 dc->completed = txd->cookie;
428 callback = txd->callback;
429 param = txd->callback_param;
430
431 txx9dmac_sync_desc_for_cpu(dc, desc);
432 list_splice_init(&txd->tx_list, &dc->free_list);
433 list_move(&desc->desc_node, &dc->free_list);
434
435 if (!ds) {
436 dma_addr_t dmaaddr;
437 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
438 dmaaddr = is_dmac64(dc) ?
439 desc->hwdesc.DAR : desc->hwdesc32.DAR;
440 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
441 dma_unmap_single(chan2parent(&dc->chan),
442 dmaaddr, desc->len, DMA_FROM_DEVICE);
443 else
444 dma_unmap_page(chan2parent(&dc->chan),
445 dmaaddr, desc->len, DMA_FROM_DEVICE);
446 }
447 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
448 dmaaddr = is_dmac64(dc) ?
449 desc->hwdesc.SAR : desc->hwdesc32.SAR;
450 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
451 dma_unmap_single(chan2parent(&dc->chan),
452 dmaaddr, desc->len, DMA_TO_DEVICE);
453 else
454 dma_unmap_page(chan2parent(&dc->chan),
455 dmaaddr, desc->len, DMA_TO_DEVICE);
456 }
457 }
458
459 /*
460 * The API requires that no submissions are done from a
461 * callback, so we don't need to drop the lock here
462 */
463 if (callback)
464 callback(param);
465 dma_run_dependencies(txd);
466}
467
468static void txx9dmac_dequeue(struct txx9dmac_chan *dc, struct list_head *list)
469{
470 struct txx9dmac_dev *ddev = dc->ddev;
471 struct txx9dmac_desc *desc;
472 struct txx9dmac_desc *prev = NULL;
473
474 BUG_ON(!list_empty(list));
475 do {
476 desc = txx9dmac_first_queued(dc);
477 if (prev) {
478 desc_write_CHAR(dc, prev, desc->txd.phys);
479 dma_sync_single_for_device(chan2parent(&dc->chan),
480 prev->txd.phys, ddev->descsize,
481 DMA_TO_DEVICE);
482 }
483 prev = txx9dmac_last_child(desc);
484 list_move_tail(&desc->desc_node, list);
485 /* Make chain-completion interrupt happen */
486 if ((desc->txd.flags & DMA_PREP_INTERRUPT) &&
487 !txx9dmac_chan_INTENT(dc))
488 break;
489 } while (!list_empty(&dc->queue));
490}
491
492static void txx9dmac_complete_all(struct txx9dmac_chan *dc)
493{
494 struct txx9dmac_desc *desc, *_desc;
495 LIST_HEAD(list);
496
497 /*
498 * Submit queued descriptors ASAP, i.e. before we go through
499 * the completed ones.
500 */
501 list_splice_init(&dc->active_list, &list);
502 if (!list_empty(&dc->queue)) {
503 txx9dmac_dequeue(dc, &dc->active_list);
504 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
505 }
506
507 list_for_each_entry_safe(desc, _desc, &list, desc_node)
508 txx9dmac_descriptor_complete(dc, desc);
509}
510
511static void txx9dmac_dump_desc(struct txx9dmac_chan *dc,
512 struct txx9dmac_hwdesc *desc)
513{
514 if (is_dmac64(dc)) {
515#ifdef TXX9_DMA_USE_SIMPLE_CHAIN
516 dev_crit(chan2dev(&dc->chan),
517 " desc: ch%#llx s%#llx d%#llx c%#x\n",
518 (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR);
519#else
520 dev_crit(chan2dev(&dc->chan),
521 " desc: ch%#llx s%#llx d%#llx c%#x"
522 " si%#x di%#x cc%#x cs%#x\n",
523 (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR,
524 desc->SAIR, desc->DAIR, desc->CCR, desc->CSR);
525#endif
526 } else {
527 struct txx9dmac_hwdesc32 *d = (struct txx9dmac_hwdesc32 *)desc;
528#ifdef TXX9_DMA_USE_SIMPLE_CHAIN
529 dev_crit(chan2dev(&dc->chan),
530 " desc: ch%#x s%#x d%#x c%#x\n",
531 d->CHAR, d->SAR, d->DAR, d->CNTR);
532#else
533 dev_crit(chan2dev(&dc->chan),
534 " desc: ch%#x s%#x d%#x c%#x"
535 " si%#x di%#x cc%#x cs%#x\n",
536 d->CHAR, d->SAR, d->DAR, d->CNTR,
537 d->SAIR, d->DAIR, d->CCR, d->CSR);
538#endif
539 }
540}
541
542static void txx9dmac_handle_error(struct txx9dmac_chan *dc, u32 csr)
543{
544 struct txx9dmac_desc *bad_desc;
545 struct txx9dmac_desc *child;
546 u32 errors;
547
548 /*
549 * The descriptor currently at the head of the active list is
550 * borked. Since we don't have any way to report errors, we'll
551 * just have to scream loudly and try to carry on.
552 */
553 dev_crit(chan2dev(&dc->chan), "Abnormal Chain Completion\n");
554 txx9dmac_dump_regs(dc);
555
556 bad_desc = txx9dmac_first_active(dc);
557 list_del_init(&bad_desc->desc_node);
558
559 /* Clear all error flags and try to restart the controller */
560 errors = csr & (TXX9_DMA_CSR_ABCHC |
561 TXX9_DMA_CSR_CFERR | TXX9_DMA_CSR_CHERR |
562 TXX9_DMA_CSR_DESERR | TXX9_DMA_CSR_SORERR);
563 channel_writel(dc, CSR, errors);
564
565 if (list_empty(&dc->active_list) && !list_empty(&dc->queue))
566 txx9dmac_dequeue(dc, &dc->active_list);
567 if (!list_empty(&dc->active_list))
568 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
569
570 dev_crit(chan2dev(&dc->chan),
571 "Bad descriptor submitted for DMA! (cookie: %d)\n",
572 bad_desc->txd.cookie);
573 txx9dmac_dump_desc(dc, &bad_desc->hwdesc);
574 list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node)
575 txx9dmac_dump_desc(dc, &child->hwdesc);
576 /* Pretend the descriptor completed successfully */
577 txx9dmac_descriptor_complete(dc, bad_desc);
578}
579
580static void txx9dmac_scan_descriptors(struct txx9dmac_chan *dc)
581{
582 dma_addr_t chain;
583 struct txx9dmac_desc *desc, *_desc;
584 struct txx9dmac_desc *child;
585 u32 csr;
586
587 if (is_dmac64(dc)) {
588 chain = channel64_read_CHAR(dc);
589 csr = channel64_readl(dc, CSR);
590 channel64_writel(dc, CSR, csr);
591 } else {
592 chain = channel32_readl(dc, CHAR);
593 csr = channel32_readl(dc, CSR);
594 channel32_writel(dc, CSR, csr);
595 }
596 /* For dynamic chain, we should look at XFACT instead of NCHNC */
597 if (!(csr & (TXX9_DMA_CSR_XFACT | TXX9_DMA_CSR_ABCHC))) {
598 /* Everything we've submitted is done */
599 txx9dmac_complete_all(dc);
600 return;
601 }
602 if (!(csr & TXX9_DMA_CSR_CHNEN))
603 chain = 0; /* last descriptor of this chain */
604
605 dev_vdbg(chan2dev(&dc->chan), "scan_descriptors: char=%#llx\n",
606 (u64)chain);
607
608 list_for_each_entry_safe(desc, _desc, &dc->active_list, desc_node) {
609 if (desc_read_CHAR(dc, desc) == chain) {
610 /* This one is currently in progress */
611 if (csr & TXX9_DMA_CSR_ABCHC)
612 goto scan_done;
613 return;
614 }
615
616 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
617 if (desc_read_CHAR(dc, child) == chain) {
618 /* Currently in progress */
619 if (csr & TXX9_DMA_CSR_ABCHC)
620 goto scan_done;
621 return;
622 }
623
624 /*
625 * No descriptors so far seem to be in progress, i.e.
626 * this one must be done.
627 */
628 txx9dmac_descriptor_complete(dc, desc);
629 }
630scan_done:
631 if (csr & TXX9_DMA_CSR_ABCHC) {
632 txx9dmac_handle_error(dc, csr);
633 return;
634 }
635
636 dev_err(chan2dev(&dc->chan),
637 "BUG: All descriptors done, but channel not idle!\n");
638
639 /* Try to continue after resetting the channel... */
640 txx9dmac_reset_chan(dc);
641
642 if (!list_empty(&dc->queue)) {
643 txx9dmac_dequeue(dc, &dc->active_list);
644 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
645 }
646}
647
648static void txx9dmac_chan_tasklet(unsigned long data)
649{
650 int irq;
651 u32 csr;
652 struct txx9dmac_chan *dc;
653
654 dc = (struct txx9dmac_chan *)data;
655 csr = channel_readl(dc, CSR);
656 dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", csr);
657
658 spin_lock(&dc->lock);
659 if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC |
660 TXX9_DMA_CSR_NTRNFC))
661 txx9dmac_scan_descriptors(dc);
662 spin_unlock(&dc->lock);
663 irq = dc->irq;
664
665 enable_irq(irq);
666}
667
668static irqreturn_t txx9dmac_chan_interrupt(int irq, void *dev_id)
669{
670 struct txx9dmac_chan *dc = dev_id;
671
672 dev_vdbg(chan2dev(&dc->chan), "interrupt: status=%#x\n",
673 channel_readl(dc, CSR));
674
675 tasklet_schedule(&dc->tasklet);
676 /*
677 * Just disable the interrupts. We'll turn them back on in the
678 * softirq handler.
679 */
680 disable_irq_nosync(irq);
681
682 return IRQ_HANDLED;
683}
684
685static void txx9dmac_tasklet(unsigned long data)
686{
687 int irq;
688 u32 csr;
689 struct txx9dmac_chan *dc;
690
691 struct txx9dmac_dev *ddev = (struct txx9dmac_dev *)data;
692 u32 mcr;
693 int i;
694
695 mcr = dma_readl(ddev, MCR);
696 dev_vdbg(ddev->chan[0]->dma.dev, "tasklet: mcr=%x\n", mcr);
697 for (i = 0; i < TXX9_DMA_MAX_NR_CHANNELS; i++) {
698 if ((mcr >> (24 + i)) & 0x11) {
699 dc = ddev->chan[i];
700 csr = channel_readl(dc, CSR);
701 dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n",
702 csr);
703 spin_lock(&dc->lock);
704 if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC |
705 TXX9_DMA_CSR_NTRNFC))
706 txx9dmac_scan_descriptors(dc);
707 spin_unlock(&dc->lock);
708 }
709 }
710 irq = ddev->irq;
711
712 enable_irq(irq);
713}
714
715static irqreturn_t txx9dmac_interrupt(int irq, void *dev_id)
716{
717 struct txx9dmac_dev *ddev = dev_id;
718
719 dev_vdbg(ddev->chan[0]->dma.dev, "interrupt: status=%#x\n",
720 dma_readl(ddev, MCR));
721
722 tasklet_schedule(&ddev->tasklet);
723 /*
724 * Just disable the interrupts. We'll turn them back on in the
725 * softirq handler.
726 */
727 disable_irq_nosync(irq);
728
729 return IRQ_HANDLED;
730}
731
732/*----------------------------------------------------------------------*/
733
734static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx)
735{
736 struct txx9dmac_desc *desc = txd_to_txx9dmac_desc(tx);
737 struct txx9dmac_chan *dc = to_txx9dmac_chan(tx->chan);
738 dma_cookie_t cookie;
739
740 spin_lock_bh(&dc->lock);
741 cookie = txx9dmac_assign_cookie(dc, desc);
742
743 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n",
744 desc->txd.cookie, desc);
745
746 list_add_tail(&desc->desc_node, &dc->queue);
747 spin_unlock_bh(&dc->lock);
748
749 return cookie;
750}
751
752static struct dma_async_tx_descriptor *
753txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
754 size_t len, unsigned long flags)
755{
756 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
757 struct txx9dmac_dev *ddev = dc->ddev;
758 struct txx9dmac_desc *desc;
759 struct txx9dmac_desc *first;
760 struct txx9dmac_desc *prev;
761 size_t xfer_count;
762 size_t offset;
763
764 dev_vdbg(chan2dev(chan), "prep_dma_memcpy d%#llx s%#llx l%#zx f%#lx\n",
765 (u64)dest, (u64)src, len, flags);
766
767 if (unlikely(!len)) {
768 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
769 return NULL;
770 }
771
772 prev = first = NULL;
773
774 for (offset = 0; offset < len; offset += xfer_count) {
775 xfer_count = min_t(size_t, len - offset, TXX9_DMA_MAX_COUNT);
776 /*
777 * Workaround for ERT-TX49H2-033, ERT-TX49H3-020,
778 * ERT-TX49H4-016 (slightly conservative)
779 */
780 if (__is_dmac64(ddev)) {
781 if (xfer_count > 0x100 &&
782 (xfer_count & 0xff) >= 0xfa &&
783 (xfer_count & 0xff) <= 0xff)
784 xfer_count -= 0x20;
785 } else {
786 if (xfer_count > 0x80 &&
787 (xfer_count & 0x7f) >= 0x7e &&
788 (xfer_count & 0x7f) <= 0x7f)
789 xfer_count -= 0x20;
790 }
791
792 desc = txx9dmac_desc_get(dc);
793 if (!desc) {
794 txx9dmac_desc_put(dc, first);
795 return NULL;
796 }
797
798 if (__is_dmac64(ddev)) {
799 desc->hwdesc.SAR = src + offset;
800 desc->hwdesc.DAR = dest + offset;
801 desc->hwdesc.CNTR = xfer_count;
802 txx9dmac_desc_set_nosimple(ddev, desc, 8, 8,
803 dc->ccr | TXX9_DMA_CCR_XFACT);
804 } else {
805 desc->hwdesc32.SAR = src + offset;
806 desc->hwdesc32.DAR = dest + offset;
807 desc->hwdesc32.CNTR = xfer_count;
808 txx9dmac_desc_set_nosimple(ddev, desc, 4, 4,
809 dc->ccr | TXX9_DMA_CCR_XFACT);
810 }
811
812 /*
813 * The descriptors on tx_list are not reachable from
814 * the dc->queue list or dc->active_list after a
815 * submit. If we put all descriptors on active_list,
816 * calling of callback on the completion will be more
817 * complex.
818 */
819 if (!first) {
820 first = desc;
821 } else {
822 desc_write_CHAR(dc, prev, desc->txd.phys);
823 dma_sync_single_for_device(chan2parent(&dc->chan),
824 prev->txd.phys, ddev->descsize,
825 DMA_TO_DEVICE);
826 list_add_tail(&desc->desc_node,
827 &first->txd.tx_list);
828 }
829 prev = desc;
830 }
831
832 /* Trigger interrupt after last block */
833 if (flags & DMA_PREP_INTERRUPT)
834 txx9dmac_desc_set_INTENT(ddev, prev);
835
836 desc_write_CHAR(dc, prev, 0);
837 dma_sync_single_for_device(chan2parent(&dc->chan),
838 prev->txd.phys, ddev->descsize,
839 DMA_TO_DEVICE);
840
841 first->txd.flags = flags;
842 first->len = len;
843
844 return &first->txd;
845}
846
847static struct dma_async_tx_descriptor *
848txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
849 unsigned int sg_len, enum dma_data_direction direction,
850 unsigned long flags)
851{
852 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
853 struct txx9dmac_dev *ddev = dc->ddev;
854 struct txx9dmac_slave *ds = chan->private;
855 struct txx9dmac_desc *prev;
856 struct txx9dmac_desc *first;
857 unsigned int i;
858 struct scatterlist *sg;
859
860 dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
861
862 BUG_ON(!ds || !ds->reg_width);
863 if (ds->tx_reg)
864 BUG_ON(direction != DMA_TO_DEVICE);
865 else
866 BUG_ON(direction != DMA_FROM_DEVICE);
867 if (unlikely(!sg_len))
868 return NULL;
869
870 prev = first = NULL;
871
872 for_each_sg(sgl, sg, sg_len, i) {
873 struct txx9dmac_desc *desc;
874 dma_addr_t mem;
875 u32 sai, dai;
876
877 desc = txx9dmac_desc_get(dc);
878 if (!desc) {
879 txx9dmac_desc_put(dc, first);
880 return NULL;
881 }
882
883 mem = sg_dma_address(sg);
884
885 if (__is_dmac64(ddev)) {
886 if (direction == DMA_TO_DEVICE) {
887 desc->hwdesc.SAR = mem;
888 desc->hwdesc.DAR = ds->tx_reg;
889 } else {
890 desc->hwdesc.SAR = ds->rx_reg;
891 desc->hwdesc.DAR = mem;
892 }
893 desc->hwdesc.CNTR = sg_dma_len(sg);
894 } else {
895 if (direction == DMA_TO_DEVICE) {
896 desc->hwdesc32.SAR = mem;
897 desc->hwdesc32.DAR = ds->tx_reg;
898 } else {
899 desc->hwdesc32.SAR = ds->rx_reg;
900 desc->hwdesc32.DAR = mem;
901 }
902 desc->hwdesc32.CNTR = sg_dma_len(sg);
903 }
904 if (direction == DMA_TO_DEVICE) {
905 sai = ds->reg_width;
906 dai = 0;
907 } else {
908 sai = 0;
909 dai = ds->reg_width;
910 }
911 txx9dmac_desc_set_nosimple(ddev, desc, sai, dai,
912 dc->ccr | TXX9_DMA_CCR_XFACT);
913
914 if (!first) {
915 first = desc;
916 } else {
917 desc_write_CHAR(dc, prev, desc->txd.phys);
918 dma_sync_single_for_device(chan2parent(&dc->chan),
919 prev->txd.phys,
920 ddev->descsize,
921 DMA_TO_DEVICE);
922 list_add_tail(&desc->desc_node,
923 &first->txd.tx_list);
924 }
925 prev = desc;
926 }
927
928 /* Trigger interrupt after last block */
929 if (flags & DMA_PREP_INTERRUPT)
930 txx9dmac_desc_set_INTENT(ddev, prev);
931
932 desc_write_CHAR(dc, prev, 0);
933 dma_sync_single_for_device(chan2parent(&dc->chan),
934 prev->txd.phys, ddev->descsize,
935 DMA_TO_DEVICE);
936
937 first->txd.flags = flags;
938 first->len = 0;
939
940 return &first->txd;
941}
942
943static void txx9dmac_terminate_all(struct dma_chan *chan)
944{
945 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
946 struct txx9dmac_desc *desc, *_desc;
947 LIST_HEAD(list);
948
949 dev_vdbg(chan2dev(chan), "terminate_all\n");
950 spin_lock_bh(&dc->lock);
951
952 txx9dmac_reset_chan(dc);
953
954 /* active_list entries will end up before queued entries */
955 list_splice_init(&dc->queue, &list);
956 list_splice_init(&dc->active_list, &list);
957
958 spin_unlock_bh(&dc->lock);
959
960 /* Flush all pending and queued descriptors */
961 list_for_each_entry_safe(desc, _desc, &list, desc_node)
962 txx9dmac_descriptor_complete(dc, desc);
963}
964
965static enum dma_status
966txx9dmac_is_tx_complete(struct dma_chan *chan,
967 dma_cookie_t cookie,
968 dma_cookie_t *done, dma_cookie_t *used)
969{
970 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
971 dma_cookie_t last_used;
972 dma_cookie_t last_complete;
973 int ret;
974
975 last_complete = dc->completed;
976 last_used = chan->cookie;
977
978 ret = dma_async_is_complete(cookie, last_complete, last_used);
979 if (ret != DMA_SUCCESS) {
980 spin_lock_bh(&dc->lock);
981 txx9dmac_scan_descriptors(dc);
982 spin_unlock_bh(&dc->lock);
983
984 last_complete = dc->completed;
985 last_used = chan->cookie;
986
987 ret = dma_async_is_complete(cookie, last_complete, last_used);
988 }
989
990 if (done)
991 *done = last_complete;
992 if (used)
993 *used = last_used;
994
995 return ret;
996}
997
998static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc,
999 struct txx9dmac_desc *prev)
1000{
1001 struct txx9dmac_dev *ddev = dc->ddev;
1002 struct txx9dmac_desc *desc;
1003 LIST_HEAD(list);
1004
1005 prev = txx9dmac_last_child(prev);
1006 txx9dmac_dequeue(dc, &list);
1007 desc = list_entry(list.next, struct txx9dmac_desc, desc_node);
1008 desc_write_CHAR(dc, prev, desc->txd.phys);
1009 dma_sync_single_for_device(chan2parent(&dc->chan),
1010 prev->txd.phys, ddev->descsize,
1011 DMA_TO_DEVICE);
1012 mmiowb();
1013 if (!(channel_readl(dc, CSR) & TXX9_DMA_CSR_CHNEN) &&
1014 channel_read_CHAR(dc) == prev->txd.phys)
1015 /* Restart chain DMA */
1016 channel_write_CHAR(dc, desc->txd.phys);
1017 list_splice_tail(&list, &dc->active_list);
1018}
1019
1020static void txx9dmac_issue_pending(struct dma_chan *chan)
1021{
1022 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
1023
1024 spin_lock_bh(&dc->lock);
1025
1026 if (!list_empty(&dc->active_list))
1027 txx9dmac_scan_descriptors(dc);
1028 if (!list_empty(&dc->queue)) {
1029 if (list_empty(&dc->active_list)) {
1030 txx9dmac_dequeue(dc, &dc->active_list);
1031 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
1032 } else if (txx9_dma_have_SMPCHN()) {
1033 struct txx9dmac_desc *prev = txx9dmac_last_active(dc);
1034
1035 if (!(prev->txd.flags & DMA_PREP_INTERRUPT) ||
1036 txx9dmac_chan_INTENT(dc))
1037 txx9dmac_chain_dynamic(dc, prev);
1038 }
1039 }
1040
1041 spin_unlock_bh(&dc->lock);
1042}
1043
1044static int txx9dmac_alloc_chan_resources(struct dma_chan *chan)
1045{
1046 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
1047 struct txx9dmac_slave *ds = chan->private;
1048 struct txx9dmac_desc *desc;
1049 int i;
1050
1051 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1052
1053 /* ASSERT: channel is idle */
1054 if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) {
1055 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
1056 return -EIO;
1057 }
1058
1059 dc->completed = chan->cookie = 1;
1060
1061 dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE;
1062 txx9dmac_chan_set_SMPCHN(dc);
1063 if (!txx9_dma_have_SMPCHN() || (dc->ccr & TXX9_DMA_CCR_SMPCHN))
1064 dc->ccr |= TXX9_DMA_CCR_INTENC;
1065 if (chan->device->device_prep_dma_memcpy) {
1066 if (ds)
1067 return -EINVAL;
1068 dc->ccr |= TXX9_DMA_CCR_XFSZ_X8;
1069 } else {
1070 if (!ds ||
1071 (ds->tx_reg && ds->rx_reg) || (!ds->tx_reg && !ds->rx_reg))
1072 return -EINVAL;
1073 dc->ccr |= TXX9_DMA_CCR_EXTRQ |
1074 TXX9_DMA_CCR_XFSZ(__ffs(ds->reg_width));
1075 txx9dmac_chan_set_INTENT(dc);
1076 }
1077
1078 spin_lock_bh(&dc->lock);
1079 i = dc->descs_allocated;
1080 while (dc->descs_allocated < TXX9_DMA_INITIAL_DESC_COUNT) {
1081 spin_unlock_bh(&dc->lock);
1082
1083 desc = txx9dmac_desc_alloc(dc, GFP_KERNEL);
1084 if (!desc) {
1085 dev_info(chan2dev(chan),
1086 "only allocated %d descriptors\n", i);
1087 spin_lock_bh(&dc->lock);
1088 break;
1089 }
1090 txx9dmac_desc_put(dc, desc);
1091
1092 spin_lock_bh(&dc->lock);
1093 i = ++dc->descs_allocated;
1094 }
1095 spin_unlock_bh(&dc->lock);
1096
1097 dev_dbg(chan2dev(chan),
1098 "alloc_chan_resources allocated %d descriptors\n", i);
1099
1100 return i;
1101}
1102
1103static void txx9dmac_free_chan_resources(struct dma_chan *chan)
1104{
1105 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
1106 struct txx9dmac_dev *ddev = dc->ddev;
1107 struct txx9dmac_desc *desc, *_desc;
1108 LIST_HEAD(list);
1109
1110 dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
1111 dc->descs_allocated);
1112
1113 /* ASSERT: channel is idle */
1114 BUG_ON(!list_empty(&dc->active_list));
1115 BUG_ON(!list_empty(&dc->queue));
1116 BUG_ON(channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT);
1117
1118 spin_lock_bh(&dc->lock);
1119 list_splice_init(&dc->free_list, &list);
1120 dc->descs_allocated = 0;
1121 spin_unlock_bh(&dc->lock);
1122
1123 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
1124 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1125 dma_unmap_single(chan2parent(chan), desc->txd.phys,
1126 ddev->descsize, DMA_TO_DEVICE);
1127 kfree(desc);
1128 }
1129
1130 dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
1131}
1132
1133/*----------------------------------------------------------------------*/
1134
1135static void txx9dmac_off(struct txx9dmac_dev *ddev)
1136{
1137 dma_writel(ddev, MCR, 0);
1138 mmiowb();
1139}
1140
1141static int __init txx9dmac_chan_probe(struct platform_device *pdev)
1142{
1143 struct txx9dmac_chan_platform_data *cpdata = pdev->dev.platform_data;
1144 struct platform_device *dmac_dev = cpdata->dmac_dev;
1145 struct txx9dmac_platform_data *pdata = dmac_dev->dev.platform_data;
1146 struct txx9dmac_chan *dc;
1147 int err;
1148 int ch = pdev->id % TXX9_DMA_MAX_NR_CHANNELS;
1149 int irq;
1150
1151 dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL);
1152 if (!dc)
1153 return -ENOMEM;
1154
1155 dc->dma.dev = &pdev->dev;
1156 dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources;
1157 dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources;
1158 dc->dma.device_terminate_all = txx9dmac_terminate_all;
1159 dc->dma.device_is_tx_complete = txx9dmac_is_tx_complete;
1160 dc->dma.device_issue_pending = txx9dmac_issue_pending;
1161 if (pdata && pdata->memcpy_chan == ch) {
1162 dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy;
1163 dma_cap_set(DMA_MEMCPY, dc->dma.cap_mask);
1164 } else {
1165 dc->dma.device_prep_slave_sg = txx9dmac_prep_slave_sg;
1166 dma_cap_set(DMA_SLAVE, dc->dma.cap_mask);
1167 dma_cap_set(DMA_PRIVATE, dc->dma.cap_mask);
1168 }
1169
1170 INIT_LIST_HEAD(&dc->dma.channels);
1171 dc->ddev = platform_get_drvdata(dmac_dev);
1172 if (dc->ddev->irq < 0) {
1173 irq = platform_get_irq(pdev, 0);
1174 if (irq < 0)
1175 return irq;
1176 tasklet_init(&dc->tasklet, txx9dmac_chan_tasklet,
1177 (unsigned long)dc);
1178 dc->irq = irq;
1179 err = devm_request_irq(&pdev->dev, dc->irq,
1180 txx9dmac_chan_interrupt, 0, dev_name(&pdev->dev), dc);
1181 if (err)
1182 return err;
1183 } else
1184 dc->irq = -1;
1185 dc->ddev->chan[ch] = dc;
1186 dc->chan.device = &dc->dma;
1187 list_add_tail(&dc->chan.device_node, &dc->chan.device->channels);
1188 dc->chan.cookie = dc->completed = 1;
1189
1190 if (is_dmac64(dc))
1191 dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch];
1192 else
1193 dc->ch_regs = &__txx9dmac_regs32(dc->ddev)->CHAN[ch];
1194 spin_lock_init(&dc->lock);
1195
1196 INIT_LIST_HEAD(&dc->active_list);
1197 INIT_LIST_HEAD(&dc->queue);
1198 INIT_LIST_HEAD(&dc->free_list);
1199
1200 txx9dmac_reset_chan(dc);
1201
1202 platform_set_drvdata(pdev, dc);
1203
1204 err = dma_async_device_register(&dc->dma);
1205 if (err)
1206 return err;
1207 dev_dbg(&pdev->dev, "TXx9 DMA Channel (dma%d%s%s)\n",
1208 dc->dma.dev_id,
1209 dma_has_cap(DMA_MEMCPY, dc->dma.cap_mask) ? " memcpy" : "",
1210 dma_has_cap(DMA_SLAVE, dc->dma.cap_mask) ? " slave" : "");
1211
1212 return 0;
1213}
1214
1215static int __exit txx9dmac_chan_remove(struct platform_device *pdev)
1216{
1217 struct txx9dmac_chan *dc = platform_get_drvdata(pdev);
1218
1219 dma_async_device_unregister(&dc->dma);
1220 if (dc->irq >= 0)
1221 tasklet_kill(&dc->tasklet);
1222 dc->ddev->chan[pdev->id % TXX9_DMA_MAX_NR_CHANNELS] = NULL;
1223 return 0;
1224}
1225
1226static int __init txx9dmac_probe(struct platform_device *pdev)
1227{
1228 struct txx9dmac_platform_data *pdata = pdev->dev.platform_data;
1229 struct resource *io;
1230 struct txx9dmac_dev *ddev;
1231 u32 mcr;
1232 int err;
1233
1234 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1235 if (!io)
1236 return -EINVAL;
1237
1238 ddev = devm_kzalloc(&pdev->dev, sizeof(*ddev), GFP_KERNEL);
1239 if (!ddev)
1240 return -ENOMEM;
1241
1242 if (!devm_request_mem_region(&pdev->dev, io->start, resource_size(io),
1243 dev_name(&pdev->dev)))
1244 return -EBUSY;
1245
1246 ddev->regs = devm_ioremap(&pdev->dev, io->start, resource_size(io));
1247 if (!ddev->regs)
1248 return -ENOMEM;
1249 ddev->have_64bit_regs = pdata->have_64bit_regs;
1250 if (__is_dmac64(ddev))
1251 ddev->descsize = sizeof(struct txx9dmac_hwdesc);
1252 else
1253 ddev->descsize = sizeof(struct txx9dmac_hwdesc32);
1254
1255 /* force dma off, just in case */
1256 txx9dmac_off(ddev);
1257
1258 ddev->irq = platform_get_irq(pdev, 0);
1259 if (ddev->irq >= 0) {
1260 tasklet_init(&ddev->tasklet, txx9dmac_tasklet,
1261 (unsigned long)ddev);
1262 err = devm_request_irq(&pdev->dev, ddev->irq,
1263 txx9dmac_interrupt, 0, dev_name(&pdev->dev), ddev);
1264 if (err)
1265 return err;
1266 }
1267
1268 mcr = TXX9_DMA_MCR_MSTEN | MCR_LE;
1269 if (pdata && pdata->memcpy_chan >= 0)
1270 mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan);
1271 dma_writel(ddev, MCR, mcr);
1272
1273 platform_set_drvdata(pdev, ddev);
1274 return 0;
1275}
1276
1277static int __exit txx9dmac_remove(struct platform_device *pdev)
1278{
1279 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1280
1281 txx9dmac_off(ddev);
1282 if (ddev->irq >= 0)
1283 tasklet_kill(&ddev->tasklet);
1284 return 0;
1285}
1286
1287static void txx9dmac_shutdown(struct platform_device *pdev)
1288{
1289 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1290
1291 txx9dmac_off(ddev);
1292}
1293
1294static int txx9dmac_suspend_late(struct platform_device *pdev,
1295 pm_message_t mesg)
1296{
1297 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1298
1299 txx9dmac_off(ddev);
1300 return 0;
1301}
1302
1303static int txx9dmac_resume_early(struct platform_device *pdev)
1304{
1305 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1306 struct txx9dmac_platform_data *pdata = pdev->dev.platform_data;
1307 u32 mcr;
1308
1309 mcr = TXX9_DMA_MCR_MSTEN | MCR_LE;
1310 if (pdata && pdata->memcpy_chan >= 0)
1311 mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan);
1312 dma_writel(ddev, MCR, mcr);
1313 return 0;
1314
1315}
1316
1317static struct platform_driver txx9dmac_chan_driver = {
1318 .remove = __exit_p(txx9dmac_chan_remove),
1319 .driver = {
1320 .name = "txx9dmac-chan",
1321 },
1322};
1323
1324static struct platform_driver txx9dmac_driver = {
1325 .remove = __exit_p(txx9dmac_remove),
1326 .shutdown = txx9dmac_shutdown,
1327 .suspend_late = txx9dmac_suspend_late,
1328 .resume_early = txx9dmac_resume_early,
1329 .driver = {
1330 .name = "txx9dmac",
1331 },
1332};
1333
1334static int __init txx9dmac_init(void)
1335{
1336 int rc;
1337
1338 rc = platform_driver_probe(&txx9dmac_driver, txx9dmac_probe);
1339 if (!rc) {
1340 rc = platform_driver_probe(&txx9dmac_chan_driver,
1341 txx9dmac_chan_probe);
1342 if (rc)
1343 platform_driver_unregister(&txx9dmac_driver);
1344 }
1345 return rc;
1346}
1347module_init(txx9dmac_init);
1348
1349static void __exit txx9dmac_exit(void)
1350{
1351 platform_driver_unregister(&txx9dmac_chan_driver);
1352 platform_driver_unregister(&txx9dmac_driver);
1353}
1354module_exit(txx9dmac_exit);
1355
1356MODULE_LICENSE("GPL");
1357MODULE_DESCRIPTION("TXx9 DMA Controller driver");
1358MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
diff --git a/drivers/dma/txx9dmac.h b/drivers/dma/txx9dmac.h
new file mode 100644
index 000000000000..c907ff01d276
--- /dev/null
+++ b/drivers/dma/txx9dmac.h
@@ -0,0 +1,307 @@
1/*
2 * Driver for the TXx9 SoC DMA Controller
3 *
4 * Copyright (C) 2009 Atsushi Nemoto
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef TXX9DMAC_H
11#define TXX9DMAC_H
12
13#include <linux/dmaengine.h>
14#include <asm/txx9/dmac.h>
15
16/*
17 * Design Notes:
18 *
19 * This DMAC have four channels and one FIFO buffer. Each channel can
20 * be configured for memory-memory or device-memory transfer, but only
21 * one channel can do alignment-free memory-memory transfer at a time
22 * while the channel should occupy the FIFO buffer for effective
23 * transfers.
24 *
25 * Instead of dynamically assign the FIFO buffer to channels, I chose
26 * make one dedicated channel for memory-memory transfer. The
27 * dedicated channel is public. Other channels are private and used
28 * for slave transfer. Some devices in the SoC are wired to certain
29 * DMA channel.
30 */
31
32#ifdef CONFIG_MACH_TX49XX
33static inline bool txx9_dma_have_SMPCHN(void)
34{
35 return true;
36}
37#define TXX9_DMA_USE_SIMPLE_CHAIN
38#else
39static inline bool txx9_dma_have_SMPCHN(void)
40{
41 return false;
42}
43#endif
44
45#ifdef __LITTLE_ENDIAN
46#ifdef CONFIG_MACH_TX49XX
47#define CCR_LE TXX9_DMA_CCR_LE
48#define MCR_LE 0
49#else
50#define CCR_LE 0
51#define MCR_LE TXX9_DMA_MCR_LE
52#endif
53#else
54#define CCR_LE 0
55#define MCR_LE 0
56#endif
57
58/*
59 * Redefine this macro to handle differences between 32- and 64-bit
60 * addressing, big vs. little endian, etc.
61 */
62#ifdef __BIG_ENDIAN
63#define TXX9_DMA_REG32(name) u32 __pad_##name; u32 name
64#else
65#define TXX9_DMA_REG32(name) u32 name; u32 __pad_##name
66#endif
67
68/* Hardware register definitions. */
69struct txx9dmac_cregs {
70#if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR)
71 TXX9_DMA_REG32(CHAR); /* Chain Address Register */
72#else
73 u64 CHAR; /* Chain Address Register */
74#endif
75 u64 SAR; /* Source Address Register */
76 u64 DAR; /* Destination Address Register */
77 TXX9_DMA_REG32(CNTR); /* Count Register */
78 TXX9_DMA_REG32(SAIR); /* Source Address Increment Register */
79 TXX9_DMA_REG32(DAIR); /* Destination Address Increment Register */
80 TXX9_DMA_REG32(CCR); /* Channel Control Register */
81 TXX9_DMA_REG32(CSR); /* Channel Status Register */
82};
83struct txx9dmac_cregs32 {
84 u32 CHAR;
85 u32 SAR;
86 u32 DAR;
87 u32 CNTR;
88 u32 SAIR;
89 u32 DAIR;
90 u32 CCR;
91 u32 CSR;
92};
93
94struct txx9dmac_regs {
95 /* per-channel registers */
96 struct txx9dmac_cregs CHAN[TXX9_DMA_MAX_NR_CHANNELS];
97 u64 __pad[9];
98 u64 MFDR; /* Memory Fill Data Register */
99 TXX9_DMA_REG32(MCR); /* Master Control Register */
100};
101struct txx9dmac_regs32 {
102 struct txx9dmac_cregs32 CHAN[TXX9_DMA_MAX_NR_CHANNELS];
103 u32 __pad[9];
104 u32 MFDR;
105 u32 MCR;
106};
107
108/* bits for MCR */
109#define TXX9_DMA_MCR_EIS(ch) (0x10000000<<(ch))
110#define TXX9_DMA_MCR_DIS(ch) (0x01000000<<(ch))
111#define TXX9_DMA_MCR_RSFIF 0x00000080
112#define TXX9_DMA_MCR_FIFUM(ch) (0x00000008<<(ch))
113#define TXX9_DMA_MCR_LE 0x00000004
114#define TXX9_DMA_MCR_RPRT 0x00000002
115#define TXX9_DMA_MCR_MSTEN 0x00000001
116
117/* bits for CCRn */
118#define TXX9_DMA_CCR_IMMCHN 0x20000000
119#define TXX9_DMA_CCR_USEXFSZ 0x10000000
120#define TXX9_DMA_CCR_LE 0x08000000
121#define TXX9_DMA_CCR_DBINH 0x04000000
122#define TXX9_DMA_CCR_SBINH 0x02000000
123#define TXX9_DMA_CCR_CHRST 0x01000000
124#define TXX9_DMA_CCR_RVBYTE 0x00800000
125#define TXX9_DMA_CCR_ACKPOL 0x00400000
126#define TXX9_DMA_CCR_REQPL 0x00200000
127#define TXX9_DMA_CCR_EGREQ 0x00100000
128#define TXX9_DMA_CCR_CHDN 0x00080000
129#define TXX9_DMA_CCR_DNCTL 0x00060000
130#define TXX9_DMA_CCR_EXTRQ 0x00010000
131#define TXX9_DMA_CCR_INTRQD 0x0000e000
132#define TXX9_DMA_CCR_INTENE 0x00001000
133#define TXX9_DMA_CCR_INTENC 0x00000800
134#define TXX9_DMA_CCR_INTENT 0x00000400
135#define TXX9_DMA_CCR_CHNEN 0x00000200
136#define TXX9_DMA_CCR_XFACT 0x00000100
137#define TXX9_DMA_CCR_SMPCHN 0x00000020
138#define TXX9_DMA_CCR_XFSZ(order) (((order) << 2) & 0x0000001c)
139#define TXX9_DMA_CCR_XFSZ_1 TXX9_DMA_CCR_XFSZ(0)
140#define TXX9_DMA_CCR_XFSZ_2 TXX9_DMA_CCR_XFSZ(1)
141#define TXX9_DMA_CCR_XFSZ_4 TXX9_DMA_CCR_XFSZ(2)
142#define TXX9_DMA_CCR_XFSZ_8 TXX9_DMA_CCR_XFSZ(3)
143#define TXX9_DMA_CCR_XFSZ_X4 TXX9_DMA_CCR_XFSZ(4)
144#define TXX9_DMA_CCR_XFSZ_X8 TXX9_DMA_CCR_XFSZ(5)
145#define TXX9_DMA_CCR_XFSZ_X16 TXX9_DMA_CCR_XFSZ(6)
146#define TXX9_DMA_CCR_XFSZ_X32 TXX9_DMA_CCR_XFSZ(7)
147#define TXX9_DMA_CCR_MEMIO 0x00000002
148#define TXX9_DMA_CCR_SNGAD 0x00000001
149
150/* bits for CSRn */
151#define TXX9_DMA_CSR_CHNEN 0x00000400
152#define TXX9_DMA_CSR_STLXFER 0x00000200
153#define TXX9_DMA_CSR_XFACT 0x00000100
154#define TXX9_DMA_CSR_ABCHC 0x00000080
155#define TXX9_DMA_CSR_NCHNC 0x00000040
156#define TXX9_DMA_CSR_NTRNFC 0x00000020
157#define TXX9_DMA_CSR_EXTDN 0x00000010
158#define TXX9_DMA_CSR_CFERR 0x00000008
159#define TXX9_DMA_CSR_CHERR 0x00000004
160#define TXX9_DMA_CSR_DESERR 0x00000002
161#define TXX9_DMA_CSR_SORERR 0x00000001
162
163struct txx9dmac_chan {
164 struct dma_chan chan;
165 struct dma_device dma;
166 struct txx9dmac_dev *ddev;
167 void __iomem *ch_regs;
168 struct tasklet_struct tasklet;
169 int irq;
170 u32 ccr;
171
172 spinlock_t lock;
173
174 /* these other elements are all protected by lock */
175 dma_cookie_t completed;
176 struct list_head active_list;
177 struct list_head queue;
178 struct list_head free_list;
179
180 unsigned int descs_allocated;
181};
182
183struct txx9dmac_dev {
184 void __iomem *regs;
185 struct tasklet_struct tasklet;
186 int irq;
187 struct txx9dmac_chan *chan[TXX9_DMA_MAX_NR_CHANNELS];
188 bool have_64bit_regs;
189 unsigned int descsize;
190};
191
192static inline bool __is_dmac64(const struct txx9dmac_dev *ddev)
193{
194 return ddev->have_64bit_regs;
195}
196
197static inline bool is_dmac64(const struct txx9dmac_chan *dc)
198{
199 return __is_dmac64(dc->ddev);
200}
201
202#ifdef TXX9_DMA_USE_SIMPLE_CHAIN
203/* Hardware descriptor definition. (for simple-chain) */
204struct txx9dmac_hwdesc {
205#if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR)
206 TXX9_DMA_REG32(CHAR);
207#else
208 u64 CHAR;
209#endif
210 u64 SAR;
211 u64 DAR;
212 TXX9_DMA_REG32(CNTR);
213};
214struct txx9dmac_hwdesc32 {
215 u32 CHAR;
216 u32 SAR;
217 u32 DAR;
218 u32 CNTR;
219};
220#else
221#define txx9dmac_hwdesc txx9dmac_cregs
222#define txx9dmac_hwdesc32 txx9dmac_cregs32
223#endif
224
225struct txx9dmac_desc {
226 /* FIRST values the hardware uses */
227 union {
228 struct txx9dmac_hwdesc hwdesc;
229 struct txx9dmac_hwdesc32 hwdesc32;
230 };
231
232 /* THEN values for driver housekeeping */
233 struct list_head desc_node ____cacheline_aligned;
234 struct dma_async_tx_descriptor txd;
235 size_t len;
236};
237
238#ifdef TXX9_DMA_USE_SIMPLE_CHAIN
239
240static inline bool txx9dmac_chan_INTENT(struct txx9dmac_chan *dc)
241{
242 return (dc->ccr & TXX9_DMA_CCR_INTENT) != 0;
243}
244
245static inline void txx9dmac_chan_set_INTENT(struct txx9dmac_chan *dc)
246{
247 dc->ccr |= TXX9_DMA_CCR_INTENT;
248}
249
250static inline void txx9dmac_desc_set_INTENT(struct txx9dmac_dev *ddev,
251 struct txx9dmac_desc *desc)
252{
253}
254
255static inline void txx9dmac_chan_set_SMPCHN(struct txx9dmac_chan *dc)
256{
257 dc->ccr |= TXX9_DMA_CCR_SMPCHN;
258}
259
260static inline void txx9dmac_desc_set_nosimple(struct txx9dmac_dev *ddev,
261 struct txx9dmac_desc *desc,
262 u32 sair, u32 dair, u32 ccr)
263{
264}
265
266#else /* TXX9_DMA_USE_SIMPLE_CHAIN */
267
268static inline bool txx9dmac_chan_INTENT(struct txx9dmac_chan *dc)
269{
270 return true;
271}
272
273static void txx9dmac_chan_set_INTENT(struct txx9dmac_chan *dc)
274{
275}
276
277static inline void txx9dmac_desc_set_INTENT(struct txx9dmac_dev *ddev,
278 struct txx9dmac_desc *desc)
279{
280 if (__is_dmac64(ddev))
281 desc->hwdesc.CCR |= TXX9_DMA_CCR_INTENT;
282 else
283 desc->hwdesc32.CCR |= TXX9_DMA_CCR_INTENT;
284}
285
286static inline void txx9dmac_chan_set_SMPCHN(struct txx9dmac_chan *dc)
287{
288}
289
290static inline void txx9dmac_desc_set_nosimple(struct txx9dmac_dev *ddev,
291 struct txx9dmac_desc *desc,
292 u32 sai, u32 dai, u32 ccr)
293{
294 if (__is_dmac64(ddev)) {
295 desc->hwdesc.SAIR = sai;
296 desc->hwdesc.DAIR = dai;
297 desc->hwdesc.CCR = ccr;
298 } else {
299 desc->hwdesc32.SAIR = sai;
300 desc->hwdesc32.DAIR = dai;
301 desc->hwdesc32.CCR = ccr;
302 }
303}
304
305#endif /* TXX9_DMA_USE_SIMPLE_CHAIN */
306
307#endif /* TXX9DMAC_H */