aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig18
-rw-r--r--drivers/dma/Makefile2
-rw-r--r--drivers/dma/at_hdmac.c1215
-rw-r--r--drivers/dma/at_hdmac_regs.h354
-rw-r--r--drivers/dma/dmaengine.c1
-rw-r--r--drivers/dma/dmatest.c21
-rw-r--r--drivers/dma/dw_dmac.c50
-rw-r--r--drivers/dma/dw_dmac_regs.h1
-rw-r--r--drivers/dma/fsldma.c305
-rw-r--r--drivers/dma/fsldma.h5
-rw-r--r--drivers/dma/ioat/dma.c7
-rw-r--r--drivers/dma/ioat/dma.h3
-rw-r--r--drivers/dma/ioat/dma_v2.c5
-rw-r--r--drivers/dma/ioat/dma_v2.h3
-rw-r--r--drivers/dma/ioat/pci.c16
-rw-r--r--drivers/dma/iop-adma.c9
-rw-r--r--drivers/dma/mv_xor.c9
-rw-r--r--drivers/dma/mv_xor.h4
-rw-r--r--drivers/dma/txx9dmac.c1356
-rw-r--r--drivers/dma/txx9dmac.h308
20 files changed, 3618 insertions, 74 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index ddcd9793b25c..fe1f3717b1ff 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -50,6 +50,14 @@ config DW_DMAC
50 Support the Synopsys DesignWare AHB DMA controller. This 50 Support the Synopsys DesignWare AHB DMA controller. This
51 can be integrated in chips such as the Atmel AT32ap7000. 51 can be integrated in chips such as the Atmel AT32ap7000.
52 52
53config AT_HDMAC
54 tristate "Atmel AHB DMA support"
55 depends on ARCH_AT91SAM9RL
56 select DMA_ENGINE
57 help
58 Support the Atmel AHB DMA controller. This can be integrated in
59 chips such as the Atmel AT91SAM9RL.
60
53config FSL_DMA 61config FSL_DMA
54 tristate "Freescale Elo and Elo Plus DMA support" 62 tristate "Freescale Elo and Elo Plus DMA support"
55 depends on FSL_SOC 63 depends on FSL_SOC
@@ -85,6 +93,14 @@ config MX3_IPU_IRQS
85 To avoid bloating the irq_desc[] array we allocate a sufficient 93 To avoid bloating the irq_desc[] array we allocate a sufficient
86 number of IRQ slots and map them dynamically to specific sources. 94 number of IRQ slots and map them dynamically to specific sources.
87 95
96config TXX9_DMAC
97 tristate "Toshiba TXx9 SoC DMA support"
98 depends on MACH_TX49XX || MACH_TX39XX
99 select DMA_ENGINE
100 help
101 Support the TXx9 SoC internal DMA controller. This can be
102 integrated in chips such as the Toshiba TX4927/38/39.
103
88config DMA_ENGINE 104config DMA_ENGINE
89 bool 105 bool
90 106
@@ -104,7 +120,7 @@ config NET_DMA
104 120
105config ASYNC_TX_DMA 121config ASYNC_TX_DMA
106 bool "Async_tx: Offload support for the async_tx api" 122 bool "Async_tx: Offload support for the async_tx api"
107 depends on DMA_ENGINE 123 depends on DMA_ENGINE && !HIGHMEM64G
108 help 124 help
109 This allows the async_tx api to take advantage of offload engines for 125 This allows the async_tx api to take advantage of offload engines for
110 memcpy, memset, xor, and raid6 p+q operations. If your platform has 126 memcpy, memset, xor, and raid6 p+q operations. If your platform has
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index a1cb2857bba6..8f115e93b4a1 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -6,4 +6,6 @@ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
6obj-$(CONFIG_FSL_DMA) += fsldma.o 6obj-$(CONFIG_FSL_DMA) += fsldma.o
7obj-$(CONFIG_MV_XOR) += mv_xor.o 7obj-$(CONFIG_MV_XOR) += mv_xor.o
8obj-$(CONFIG_DW_DMAC) += dw_dmac.o 8obj-$(CONFIG_DW_DMAC) += dw_dmac.o
9obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
9obj-$(CONFIG_MX3_IPU) += ipu/ 10obj-$(CONFIG_MX3_IPU) += ipu/
11obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
new file mode 100644
index 000000000000..0aeb578a24e3
--- /dev/null
+++ b/drivers/dma/at_hdmac.c
@@ -0,0 +1,1215 @@
1/*
2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
3 *
4 * Copyright (C) 2008 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 *
12 * This supports the Atmel AHB DMA Controller,
13 *
14 * The driver has currently been tested with the Atmel AT91SAM9RL
15 * and AT91SAM9G45 series.
16 */
17
18#include <linux/clk.h>
19#include <linux/dmaengine.h>
20#include <linux/dma-mapping.h>
21#include <linux/dmapool.h>
22#include <linux/interrupt.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
25
26#include "at_hdmac_regs.h"
27
28/*
29 * Glossary
30 * --------
31 *
32 * at_hdmac : Name of the ATmel AHB DMA Controller
33 * at_dma_ / atdma : ATmel DMA controller entity related
34 * atc_ / atchan : ATmel DMA Channel entity related
35 */
36
37#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
38#define ATC_DEFAULT_CTRLA (0)
39#define ATC_DEFAULT_CTRLB (ATC_SIF(0) \
40 |ATC_DIF(1))
41
42/*
43 * Initial number of descriptors to allocate for each channel. This could
44 * be increased during dma usage.
45 */
46static unsigned int init_nr_desc_per_channel = 64;
47module_param(init_nr_desc_per_channel, uint, 0644);
48MODULE_PARM_DESC(init_nr_desc_per_channel,
49 "initial descriptors per channel (default: 64)");
50
51
52/* prototypes */
53static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
54
55
56/*----------------------------------------------------------------------*/
57
58static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
59{
60 return list_first_entry(&atchan->active_list,
61 struct at_desc, desc_node);
62}
63
64static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
65{
66 return list_first_entry(&atchan->queue,
67 struct at_desc, desc_node);
68}
69
70/**
71 * atc_alloc_descriptor - allocate and return an initilized descriptor
72 * @chan: the channel to allocate descriptors for
73 * @gfp_flags: GFP allocation flags
74 *
75 * Note: The ack-bit is positioned in the descriptor flag at creation time
76 * to make initial allocation more convenient. This bit will be cleared
77 * and control will be given to client at usage time (during
78 * preparation functions).
79 */
80static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
81 gfp_t gfp_flags)
82{
83 struct at_desc *desc = NULL;
84 struct at_dma *atdma = to_at_dma(chan->device);
85 dma_addr_t phys;
86
87 desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
88 if (desc) {
89 memset(desc, 0, sizeof(struct at_desc));
90 INIT_LIST_HEAD(&desc->tx_list);
91 dma_async_tx_descriptor_init(&desc->txd, chan);
92 /* txd.flags will be overwritten in prep functions */
93 desc->txd.flags = DMA_CTRL_ACK;
94 desc->txd.tx_submit = atc_tx_submit;
95 desc->txd.phys = phys;
96 }
97
98 return desc;
99}
100
101/**
102 * atc_desc_get - get a unsused descriptor from free_list
103 * @atchan: channel we want a new descriptor for
104 */
105static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
106{
107 struct at_desc *desc, *_desc;
108 struct at_desc *ret = NULL;
109 unsigned int i = 0;
110 LIST_HEAD(tmp_list);
111
112 spin_lock_bh(&atchan->lock);
113 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
114 i++;
115 if (async_tx_test_ack(&desc->txd)) {
116 list_del(&desc->desc_node);
117 ret = desc;
118 break;
119 }
120 dev_dbg(chan2dev(&atchan->chan_common),
121 "desc %p not ACKed\n", desc);
122 }
123 spin_unlock_bh(&atchan->lock);
124 dev_vdbg(chan2dev(&atchan->chan_common),
125 "scanned %u descriptors on freelist\n", i);
126
127 /* no more descriptor available in initial pool: create one more */
128 if (!ret) {
129 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
130 if (ret) {
131 spin_lock_bh(&atchan->lock);
132 atchan->descs_allocated++;
133 spin_unlock_bh(&atchan->lock);
134 } else {
135 dev_err(chan2dev(&atchan->chan_common),
136 "not enough descriptors available\n");
137 }
138 }
139
140 return ret;
141}
142
143/**
144 * atc_desc_put - move a descriptor, including any children, to the free list
145 * @atchan: channel we work on
146 * @desc: descriptor, at the head of a chain, to move to free list
147 */
148static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
149{
150 if (desc) {
151 struct at_desc *child;
152
153 spin_lock_bh(&atchan->lock);
154 list_for_each_entry(child, &desc->tx_list, desc_node)
155 dev_vdbg(chan2dev(&atchan->chan_common),
156 "moving child desc %p to freelist\n",
157 child);
158 list_splice_init(&desc->tx_list, &atchan->free_list);
159 dev_vdbg(chan2dev(&atchan->chan_common),
160 "moving desc %p to freelist\n", desc);
161 list_add(&desc->desc_node, &atchan->free_list);
162 spin_unlock_bh(&atchan->lock);
163 }
164}
165
166/**
167 * atc_assign_cookie - compute and assign new cookie
168 * @atchan: channel we work on
169 * @desc: descriptor to asign cookie for
170 *
171 * Called with atchan->lock held and bh disabled
172 */
173static dma_cookie_t
174atc_assign_cookie(struct at_dma_chan *atchan, struct at_desc *desc)
175{
176 dma_cookie_t cookie = atchan->chan_common.cookie;
177
178 if (++cookie < 0)
179 cookie = 1;
180
181 atchan->chan_common.cookie = cookie;
182 desc->txd.cookie = cookie;
183
184 return cookie;
185}
186
187/**
188 * atc_dostart - starts the DMA engine for real
189 * @atchan: the channel we want to start
190 * @first: first descriptor in the list we want to begin with
191 *
192 * Called with atchan->lock held and bh disabled
193 */
194static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
195{
196 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
197
198 /* ASSERT: channel is idle */
199 if (atc_chan_is_enabled(atchan)) {
200 dev_err(chan2dev(&atchan->chan_common),
201 "BUG: Attempted to start non-idle channel\n");
202 dev_err(chan2dev(&atchan->chan_common),
203 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
204 channel_readl(atchan, SADDR),
205 channel_readl(atchan, DADDR),
206 channel_readl(atchan, CTRLA),
207 channel_readl(atchan, CTRLB),
208 channel_readl(atchan, DSCR));
209
210 /* The tasklet will hopefully advance the queue... */
211 return;
212 }
213
214 vdbg_dump_regs(atchan);
215
216 /* clear any pending interrupt */
217 while (dma_readl(atdma, EBCISR))
218 cpu_relax();
219
220 channel_writel(atchan, SADDR, 0);
221 channel_writel(atchan, DADDR, 0);
222 channel_writel(atchan, CTRLA, 0);
223 channel_writel(atchan, CTRLB, 0);
224 channel_writel(atchan, DSCR, first->txd.phys);
225 dma_writel(atdma, CHER, atchan->mask);
226
227 vdbg_dump_regs(atchan);
228}
229
230/**
231 * atc_chain_complete - finish work for one transaction chain
232 * @atchan: channel we work on
233 * @desc: descriptor at the head of the chain we want do complete
234 *
235 * Called with atchan->lock held and bh disabled */
236static void
237atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
238{
239 dma_async_tx_callback callback;
240 void *param;
241 struct dma_async_tx_descriptor *txd = &desc->txd;
242
243 dev_vdbg(chan2dev(&atchan->chan_common),
244 "descriptor %u complete\n", txd->cookie);
245
246 atchan->completed_cookie = txd->cookie;
247 callback = txd->callback;
248 param = txd->callback_param;
249
250 /* move children to free_list */
251 list_splice_init(&desc->tx_list, &atchan->free_list);
252 /* move myself to free_list */
253 list_move(&desc->desc_node, &atchan->free_list);
254
255 /* unmap dma addresses */
256 if (!atchan->chan_common.private) {
257 struct device *parent = chan2parent(&atchan->chan_common);
258 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
259 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
260 dma_unmap_single(parent,
261 desc->lli.daddr,
262 desc->len, DMA_FROM_DEVICE);
263 else
264 dma_unmap_page(parent,
265 desc->lli.daddr,
266 desc->len, DMA_FROM_DEVICE);
267 }
268 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
269 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
270 dma_unmap_single(parent,
271 desc->lli.saddr,
272 desc->len, DMA_TO_DEVICE);
273 else
274 dma_unmap_page(parent,
275 desc->lli.saddr,
276 desc->len, DMA_TO_DEVICE);
277 }
278 }
279
280 /*
281 * The API requires that no submissions are done from a
282 * callback, so we don't need to drop the lock here
283 */
284 if (callback)
285 callback(param);
286
287 dma_run_dependencies(txd);
288}
289
290/**
291 * atc_complete_all - finish work for all transactions
292 * @atchan: channel to complete transactions for
293 *
294 * Eventually submit queued descriptors if any
295 *
296 * Assume channel is idle while calling this function
297 * Called with atchan->lock held and bh disabled
298 */
299static void atc_complete_all(struct at_dma_chan *atchan)
300{
301 struct at_desc *desc, *_desc;
302 LIST_HEAD(list);
303
304 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
305
306 BUG_ON(atc_chan_is_enabled(atchan));
307
308 /*
309 * Submit queued descriptors ASAP, i.e. before we go through
310 * the completed ones.
311 */
312 if (!list_empty(&atchan->queue))
313 atc_dostart(atchan, atc_first_queued(atchan));
314 /* empty active_list now it is completed */
315 list_splice_init(&atchan->active_list, &list);
316 /* empty queue list by moving descriptors (if any) to active_list */
317 list_splice_init(&atchan->queue, &atchan->active_list);
318
319 list_for_each_entry_safe(desc, _desc, &list, desc_node)
320 atc_chain_complete(atchan, desc);
321}
322
323/**
324 * atc_cleanup_descriptors - cleanup up finished descriptors in active_list
325 * @atchan: channel to be cleaned up
326 *
327 * Called with atchan->lock held and bh disabled
328 */
329static void atc_cleanup_descriptors(struct at_dma_chan *atchan)
330{
331 struct at_desc *desc, *_desc;
332 struct at_desc *child;
333
334 dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n");
335
336 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
337 if (!(desc->lli.ctrla & ATC_DONE))
338 /* This one is currently in progress */
339 return;
340
341 list_for_each_entry(child, &desc->tx_list, desc_node)
342 if (!(child->lli.ctrla & ATC_DONE))
343 /* Currently in progress */
344 return;
345
346 /*
347 * No descriptors so far seem to be in progress, i.e.
348 * this chain must be done.
349 */
350 atc_chain_complete(atchan, desc);
351 }
352}
353
354/**
355 * atc_advance_work - at the end of a transaction, move forward
356 * @atchan: channel where the transaction ended
357 *
358 * Called with atchan->lock held and bh disabled
359 */
360static void atc_advance_work(struct at_dma_chan *atchan)
361{
362 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
363
364 if (list_empty(&atchan->active_list) ||
365 list_is_singular(&atchan->active_list)) {
366 atc_complete_all(atchan);
367 } else {
368 atc_chain_complete(atchan, atc_first_active(atchan));
369 /* advance work */
370 atc_dostart(atchan, atc_first_active(atchan));
371 }
372}
373
374
375/**
376 * atc_handle_error - handle errors reported by DMA controller
377 * @atchan: channel where error occurs
378 *
379 * Called with atchan->lock held and bh disabled
380 */
381static void atc_handle_error(struct at_dma_chan *atchan)
382{
383 struct at_desc *bad_desc;
384 struct at_desc *child;
385
386 /*
387 * The descriptor currently at the head of the active list is
388 * broked. Since we don't have any way to report errors, we'll
389 * just have to scream loudly and try to carry on.
390 */
391 bad_desc = atc_first_active(atchan);
392 list_del_init(&bad_desc->desc_node);
393
394 /* As we are stopped, take advantage to push queued descriptors
395 * in active_list */
396 list_splice_init(&atchan->queue, atchan->active_list.prev);
397
398 /* Try to restart the controller */
399 if (!list_empty(&atchan->active_list))
400 atc_dostart(atchan, atc_first_active(atchan));
401
402 /*
403 * KERN_CRITICAL may seem harsh, but since this only happens
404 * when someone submits a bad physical address in a
405 * descriptor, we should consider ourselves lucky that the
406 * controller flagged an error instead of scribbling over
407 * random memory locations.
408 */
409 dev_crit(chan2dev(&atchan->chan_common),
410 "Bad descriptor submitted for DMA!\n");
411 dev_crit(chan2dev(&atchan->chan_common),
412 " cookie: %d\n", bad_desc->txd.cookie);
413 atc_dump_lli(atchan, &bad_desc->lli);
414 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
415 atc_dump_lli(atchan, &child->lli);
416
417 /* Pretend the descriptor completed successfully */
418 atc_chain_complete(atchan, bad_desc);
419}
420
421
422/*-- IRQ & Tasklet ---------------------------------------------------*/
423
424static void atc_tasklet(unsigned long data)
425{
426 struct at_dma_chan *atchan = (struct at_dma_chan *)data;
427
428 /* Channel cannot be enabled here */
429 if (atc_chan_is_enabled(atchan)) {
430 dev_err(chan2dev(&atchan->chan_common),
431 "BUG: channel enabled in tasklet\n");
432 return;
433 }
434
435 spin_lock(&atchan->lock);
436 if (test_and_clear_bit(0, &atchan->error_status))
437 atc_handle_error(atchan);
438 else
439 atc_advance_work(atchan);
440
441 spin_unlock(&atchan->lock);
442}
443
444static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
445{
446 struct at_dma *atdma = (struct at_dma *)dev_id;
447 struct at_dma_chan *atchan;
448 int i;
449 u32 status, pending, imr;
450 int ret = IRQ_NONE;
451
452 do {
453 imr = dma_readl(atdma, EBCIMR);
454 status = dma_readl(atdma, EBCISR);
455 pending = status & imr;
456
457 if (!pending)
458 break;
459
460 dev_vdbg(atdma->dma_common.dev,
461 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
462 status, imr, pending);
463
464 for (i = 0; i < atdma->dma_common.chancnt; i++) {
465 atchan = &atdma->chan[i];
466 if (pending & (AT_DMA_CBTC(i) | AT_DMA_ERR(i))) {
467 if (pending & AT_DMA_ERR(i)) {
468 /* Disable channel on AHB error */
469 dma_writel(atdma, CHDR, atchan->mask);
470 /* Give information to tasklet */
471 set_bit(0, &atchan->error_status);
472 }
473 tasklet_schedule(&atchan->tasklet);
474 ret = IRQ_HANDLED;
475 }
476 }
477
478 } while (pending);
479
480 return ret;
481}
482
483
484/*-- DMA Engine API --------------------------------------------------*/
485
486/**
487 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
488 * @desc: descriptor at the head of the transaction chain
489 *
490 * Queue chain if DMA engine is working already
491 *
492 * Cookie increment and adding to active_list or queue must be atomic
493 */
494static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
495{
496 struct at_desc *desc = txd_to_at_desc(tx);
497 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
498 dma_cookie_t cookie;
499
500 spin_lock_bh(&atchan->lock);
501 cookie = atc_assign_cookie(atchan, desc);
502
503 if (list_empty(&atchan->active_list)) {
504 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
505 desc->txd.cookie);
506 atc_dostart(atchan, desc);
507 list_add_tail(&desc->desc_node, &atchan->active_list);
508 } else {
509 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
510 desc->txd.cookie);
511 list_add_tail(&desc->desc_node, &atchan->queue);
512 }
513
514 spin_unlock_bh(&atchan->lock);
515
516 return cookie;
517}
518
519/**
520 * atc_prep_dma_memcpy - prepare a memcpy operation
521 * @chan: the channel to prepare operation on
522 * @dest: operation virtual destination address
523 * @src: operation virtual source address
524 * @len: operation length
525 * @flags: tx descriptor status flags
526 */
527static struct dma_async_tx_descriptor *
528atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
529 size_t len, unsigned long flags)
530{
531 struct at_dma_chan *atchan = to_at_dma_chan(chan);
532 struct at_desc *desc = NULL;
533 struct at_desc *first = NULL;
534 struct at_desc *prev = NULL;
535 size_t xfer_count;
536 size_t offset;
537 unsigned int src_width;
538 unsigned int dst_width;
539 u32 ctrla;
540 u32 ctrlb;
541
542 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
543 dest, src, len, flags);
544
545 if (unlikely(!len)) {
546 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
547 return NULL;
548 }
549
550 ctrla = ATC_DEFAULT_CTRLA;
551 ctrlb = ATC_DEFAULT_CTRLB
552 | ATC_SRC_ADDR_MODE_INCR
553 | ATC_DST_ADDR_MODE_INCR
554 | ATC_FC_MEM2MEM;
555
556 /*
557 * We can be a lot more clever here, but this should take care
558 * of the most common optimization.
559 */
560 if (!((src | dest | len) & 3)) {
561 ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
562 src_width = dst_width = 2;
563 } else if (!((src | dest | len) & 1)) {
564 ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
565 src_width = dst_width = 1;
566 } else {
567 ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
568 src_width = dst_width = 0;
569 }
570
571 for (offset = 0; offset < len; offset += xfer_count << src_width) {
572 xfer_count = min_t(size_t, (len - offset) >> src_width,
573 ATC_BTSIZE_MAX);
574
575 desc = atc_desc_get(atchan);
576 if (!desc)
577 goto err_desc_get;
578
579 desc->lli.saddr = src + offset;
580 desc->lli.daddr = dest + offset;
581 desc->lli.ctrla = ctrla | xfer_count;
582 desc->lli.ctrlb = ctrlb;
583
584 desc->txd.cookie = 0;
585 async_tx_ack(&desc->txd);
586
587 if (!first) {
588 first = desc;
589 } else {
590 /* inform the HW lli about chaining */
591 prev->lli.dscr = desc->txd.phys;
592 /* insert the link descriptor to the LD ring */
593 list_add_tail(&desc->desc_node,
594 &first->tx_list);
595 }
596 prev = desc;
597 }
598
599 /* First descriptor of the chain embedds additional information */
600 first->txd.cookie = -EBUSY;
601 first->len = len;
602
603 /* set end-of-link to the last link descriptor of list*/
604 set_desc_eol(desc);
605
606 desc->txd.flags = flags; /* client is in control of this ack */
607
608 return &first->txd;
609
610err_desc_get:
611 atc_desc_put(atchan, first);
612 return NULL;
613}
614
615
616/**
617 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
618 * @chan: DMA channel
619 * @sgl: scatterlist to transfer to/from
620 * @sg_len: number of entries in @scatterlist
621 * @direction: DMA direction
622 * @flags: tx descriptor status flags
623 */
624static struct dma_async_tx_descriptor *
625atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
626 unsigned int sg_len, enum dma_data_direction direction,
627 unsigned long flags)
628{
629 struct at_dma_chan *atchan = to_at_dma_chan(chan);
630 struct at_dma_slave *atslave = chan->private;
631 struct at_desc *first = NULL;
632 struct at_desc *prev = NULL;
633 u32 ctrla;
634 u32 ctrlb;
635 dma_addr_t reg;
636 unsigned int reg_width;
637 unsigned int mem_width;
638 unsigned int i;
639 struct scatterlist *sg;
640 size_t total_len = 0;
641
642 dev_vdbg(chan2dev(chan), "prep_slave_sg: %s f0x%lx\n",
643 direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
644 flags);
645
646 if (unlikely(!atslave || !sg_len)) {
647 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
648 return NULL;
649 }
650
651 reg_width = atslave->reg_width;
652
653 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
654 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN;
655
656 switch (direction) {
657 case DMA_TO_DEVICE:
658 ctrla |= ATC_DST_WIDTH(reg_width);
659 ctrlb |= ATC_DST_ADDR_MODE_FIXED
660 | ATC_SRC_ADDR_MODE_INCR
661 | ATC_FC_MEM2PER;
662 reg = atslave->tx_reg;
663 for_each_sg(sgl, sg, sg_len, i) {
664 struct at_desc *desc;
665 u32 len;
666 u32 mem;
667
668 desc = atc_desc_get(atchan);
669 if (!desc)
670 goto err_desc_get;
671
672 mem = sg_phys(sg);
673 len = sg_dma_len(sg);
674 mem_width = 2;
675 if (unlikely(mem & 3 || len & 3))
676 mem_width = 0;
677
678 desc->lli.saddr = mem;
679 desc->lli.daddr = reg;
680 desc->lli.ctrla = ctrla
681 | ATC_SRC_WIDTH(mem_width)
682 | len >> mem_width;
683 desc->lli.ctrlb = ctrlb;
684
685 if (!first) {
686 first = desc;
687 } else {
688 /* inform the HW lli about chaining */
689 prev->lli.dscr = desc->txd.phys;
690 /* insert the link descriptor to the LD ring */
691 list_add_tail(&desc->desc_node,
692 &first->tx_list);
693 }
694 prev = desc;
695 total_len += len;
696 }
697 break;
698 case DMA_FROM_DEVICE:
699 ctrla |= ATC_SRC_WIDTH(reg_width);
700 ctrlb |= ATC_DST_ADDR_MODE_INCR
701 | ATC_SRC_ADDR_MODE_FIXED
702 | ATC_FC_PER2MEM;
703
704 reg = atslave->rx_reg;
705 for_each_sg(sgl, sg, sg_len, i) {
706 struct at_desc *desc;
707 u32 len;
708 u32 mem;
709
710 desc = atc_desc_get(atchan);
711 if (!desc)
712 goto err_desc_get;
713
714 mem = sg_phys(sg);
715 len = sg_dma_len(sg);
716 mem_width = 2;
717 if (unlikely(mem & 3 || len & 3))
718 mem_width = 0;
719
720 desc->lli.saddr = reg;
721 desc->lli.daddr = mem;
722 desc->lli.ctrla = ctrla
723 | ATC_DST_WIDTH(mem_width)
724 | len >> mem_width;
725 desc->lli.ctrlb = ctrlb;
726
727 if (!first) {
728 first = desc;
729 } else {
730 /* inform the HW lli about chaining */
731 prev->lli.dscr = desc->txd.phys;
732 /* insert the link descriptor to the LD ring */
733 list_add_tail(&desc->desc_node,
734 &first->tx_list);
735 }
736 prev = desc;
737 total_len += len;
738 }
739 break;
740 default:
741 return NULL;
742 }
743
744 /* set end-of-link to the last link descriptor of list*/
745 set_desc_eol(prev);
746
747 /* First descriptor of the chain embedds additional information */
748 first->txd.cookie = -EBUSY;
749 first->len = total_len;
750
751 /* last link descriptor of list is responsible of flags */
752 prev->txd.flags = flags; /* client is in control of this ack */
753
754 return &first->txd;
755
756err_desc_get:
757 dev_err(chan2dev(chan), "not enough descriptors available\n");
758 atc_desc_put(atchan, first);
759 return NULL;
760}
761
762static void atc_terminate_all(struct dma_chan *chan)
763{
764 struct at_dma_chan *atchan = to_at_dma_chan(chan);
765 struct at_dma *atdma = to_at_dma(chan->device);
766 struct at_desc *desc, *_desc;
767 LIST_HEAD(list);
768
769 /*
770 * This is only called when something went wrong elsewhere, so
771 * we don't really care about the data. Just disable the
772 * channel. We still have to poll the channel enable bit due
773 * to AHB/HSB limitations.
774 */
775 spin_lock_bh(&atchan->lock);
776
777 dma_writel(atdma, CHDR, atchan->mask);
778
779 /* confirm that this channel is disabled */
780 while (dma_readl(atdma, CHSR) & atchan->mask)
781 cpu_relax();
782
783 /* active_list entries will end up before queued entries */
784 list_splice_init(&atchan->queue, &list);
785 list_splice_init(&atchan->active_list, &list);
786
787 spin_unlock_bh(&atchan->lock);
788
789 /* Flush all pending and queued descriptors */
790 list_for_each_entry_safe(desc, _desc, &list, desc_node)
791 atc_chain_complete(atchan, desc);
792}
793
794/**
795 * atc_is_tx_complete - poll for transaction completion
796 * @chan: DMA channel
797 * @cookie: transaction identifier to check status of
798 * @done: if not %NULL, updated with last completed transaction
799 * @used: if not %NULL, updated with last used transaction
800 *
801 * If @done and @used are passed in, upon return they reflect the driver
802 * internal state and can be used with dma_async_is_complete() to check
803 * the status of multiple cookies without re-checking hardware state.
804 */
805static enum dma_status
806atc_is_tx_complete(struct dma_chan *chan,
807 dma_cookie_t cookie,
808 dma_cookie_t *done, dma_cookie_t *used)
809{
810 struct at_dma_chan *atchan = to_at_dma_chan(chan);
811 dma_cookie_t last_used;
812 dma_cookie_t last_complete;
813 enum dma_status ret;
814
815 dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n",
816 cookie, done ? *done : 0, used ? *used : 0);
817
818 spin_lock_bh(atchan->lock);
819
820 last_complete = atchan->completed_cookie;
821 last_used = chan->cookie;
822
823 ret = dma_async_is_complete(cookie, last_complete, last_used);
824 if (ret != DMA_SUCCESS) {
825 atc_cleanup_descriptors(atchan);
826
827 last_complete = atchan->completed_cookie;
828 last_used = chan->cookie;
829
830 ret = dma_async_is_complete(cookie, last_complete, last_used);
831 }
832
833 spin_unlock_bh(atchan->lock);
834
835 if (done)
836 *done = last_complete;
837 if (used)
838 *used = last_used;
839
840 return ret;
841}
842
843/**
844 * atc_issue_pending - try to finish work
845 * @chan: target DMA channel
846 */
847static void atc_issue_pending(struct dma_chan *chan)
848{
849 struct at_dma_chan *atchan = to_at_dma_chan(chan);
850
851 dev_vdbg(chan2dev(chan), "issue_pending\n");
852
853 if (!atc_chan_is_enabled(atchan)) {
854 spin_lock_bh(&atchan->lock);
855 atc_advance_work(atchan);
856 spin_unlock_bh(&atchan->lock);
857 }
858}
859
860/**
861 * atc_alloc_chan_resources - allocate resources for DMA channel
862 * @chan: allocate descriptor resources for this channel
863 * @client: current client requesting the channel be ready for requests
864 *
865 * return - the number of allocated descriptors
866 */
867static int atc_alloc_chan_resources(struct dma_chan *chan)
868{
869 struct at_dma_chan *atchan = to_at_dma_chan(chan);
870 struct at_dma *atdma = to_at_dma(chan->device);
871 struct at_desc *desc;
872 struct at_dma_slave *atslave;
873 int i;
874 u32 cfg;
875 LIST_HEAD(tmp_list);
876
877 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
878
879 /* ASSERT: channel is idle */
880 if (atc_chan_is_enabled(atchan)) {
881 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
882 return -EIO;
883 }
884
885 cfg = ATC_DEFAULT_CFG;
886
887 atslave = chan->private;
888 if (atslave) {
889 /*
890 * We need controller-specific data to set up slave
891 * transfers.
892 */
893 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
894
895 /* if cfg configuration specified take it instad of default */
896 if (atslave->cfg)
897 cfg = atslave->cfg;
898 }
899
900 /* have we already been set up?
901 * reconfigure channel but no need to reallocate descriptors */
902 if (!list_empty(&atchan->free_list))
903 return atchan->descs_allocated;
904
905 /* Allocate initial pool of descriptors */
906 for (i = 0; i < init_nr_desc_per_channel; i++) {
907 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
908 if (!desc) {
909 dev_err(atdma->dma_common.dev,
910 "Only %d initial descriptors\n", i);
911 break;
912 }
913 list_add_tail(&desc->desc_node, &tmp_list);
914 }
915
916 spin_lock_bh(&atchan->lock);
917 atchan->descs_allocated = i;
918 list_splice(&tmp_list, &atchan->free_list);
919 atchan->completed_cookie = chan->cookie = 1;
920 spin_unlock_bh(&atchan->lock);
921
922 /* channel parameters */
923 channel_writel(atchan, CFG, cfg);
924
925 dev_dbg(chan2dev(chan),
926 "alloc_chan_resources: allocated %d descriptors\n",
927 atchan->descs_allocated);
928
929 return atchan->descs_allocated;
930}
931
932/**
933 * atc_free_chan_resources - free all channel resources
934 * @chan: DMA channel
935 */
936static void atc_free_chan_resources(struct dma_chan *chan)
937{
938 struct at_dma_chan *atchan = to_at_dma_chan(chan);
939 struct at_dma *atdma = to_at_dma(chan->device);
940 struct at_desc *desc, *_desc;
941 LIST_HEAD(list);
942
943 dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
944 atchan->descs_allocated);
945
946 /* ASSERT: channel is idle */
947 BUG_ON(!list_empty(&atchan->active_list));
948 BUG_ON(!list_empty(&atchan->queue));
949 BUG_ON(atc_chan_is_enabled(atchan));
950
951 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
952 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
953 list_del(&desc->desc_node);
954 /* free link descriptor */
955 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
956 }
957 list_splice_init(&atchan->free_list, &list);
958 atchan->descs_allocated = 0;
959
960 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
961}
962
963
964/*-- Module Management -----------------------------------------------*/
965
966/**
967 * at_dma_off - disable DMA controller
968 * @atdma: the Atmel HDAMC device
969 */
970static void at_dma_off(struct at_dma *atdma)
971{
972 dma_writel(atdma, EN, 0);
973
974 /* disable all interrupts */
975 dma_writel(atdma, EBCIDR, -1L);
976
977 /* confirm that all channels are disabled */
978 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
979 cpu_relax();
980}
981
982static int __init at_dma_probe(struct platform_device *pdev)
983{
984 struct at_dma_platform_data *pdata;
985 struct resource *io;
986 struct at_dma *atdma;
987 size_t size;
988 int irq;
989 int err;
990 int i;
991
992 /* get DMA Controller parameters from platform */
993 pdata = pdev->dev.platform_data;
994 if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS)
995 return -EINVAL;
996
997 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
998 if (!io)
999 return -EINVAL;
1000
1001 irq = platform_get_irq(pdev, 0);
1002 if (irq < 0)
1003 return irq;
1004
1005 size = sizeof(struct at_dma);
1006 size += pdata->nr_channels * sizeof(struct at_dma_chan);
1007 atdma = kzalloc(size, GFP_KERNEL);
1008 if (!atdma)
1009 return -ENOMEM;
1010
1011 /* discover transaction capabilites from the platform data */
1012 atdma->dma_common.cap_mask = pdata->cap_mask;
1013 atdma->all_chan_mask = (1 << pdata->nr_channels) - 1;
1014
1015 size = io->end - io->start + 1;
1016 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1017 err = -EBUSY;
1018 goto err_kfree;
1019 }
1020
1021 atdma->regs = ioremap(io->start, size);
1022 if (!atdma->regs) {
1023 err = -ENOMEM;
1024 goto err_release_r;
1025 }
1026
1027 atdma->clk = clk_get(&pdev->dev, "dma_clk");
1028 if (IS_ERR(atdma->clk)) {
1029 err = PTR_ERR(atdma->clk);
1030 goto err_clk;
1031 }
1032 clk_enable(atdma->clk);
1033
1034 /* force dma off, just in case */
1035 at_dma_off(atdma);
1036
1037 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1038 if (err)
1039 goto err_irq;
1040
1041 platform_set_drvdata(pdev, atdma);
1042
1043 /* create a pool of consistent memory blocks for hardware descriptors */
1044 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1045 &pdev->dev, sizeof(struct at_desc),
1046 4 /* word alignment */, 0);
1047 if (!atdma->dma_desc_pool) {
1048 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1049 err = -ENOMEM;
1050 goto err_pool_create;
1051 }
1052
1053 /* clear any pending interrupt */
1054 while (dma_readl(atdma, EBCISR))
1055 cpu_relax();
1056
1057 /* initialize channels related values */
1058 INIT_LIST_HEAD(&atdma->dma_common.channels);
1059 for (i = 0; i < pdata->nr_channels; i++, atdma->dma_common.chancnt++) {
1060 struct at_dma_chan *atchan = &atdma->chan[i];
1061
1062 atchan->chan_common.device = &atdma->dma_common;
1063 atchan->chan_common.cookie = atchan->completed_cookie = 1;
1064 atchan->chan_common.chan_id = i;
1065 list_add_tail(&atchan->chan_common.device_node,
1066 &atdma->dma_common.channels);
1067
1068 atchan->ch_regs = atdma->regs + ch_regs(i);
1069 spin_lock_init(&atchan->lock);
1070 atchan->mask = 1 << i;
1071
1072 INIT_LIST_HEAD(&atchan->active_list);
1073 INIT_LIST_HEAD(&atchan->queue);
1074 INIT_LIST_HEAD(&atchan->free_list);
1075
1076 tasklet_init(&atchan->tasklet, atc_tasklet,
1077 (unsigned long)atchan);
1078 atc_enable_irq(atchan);
1079 }
1080
1081 /* set base routines */
1082 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1083 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
1084 atdma->dma_common.device_is_tx_complete = atc_is_tx_complete;
1085 atdma->dma_common.device_issue_pending = atc_issue_pending;
1086 atdma->dma_common.dev = &pdev->dev;
1087
1088 /* set prep routines based on capability */
1089 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1090 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1091
1092 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
1093 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1094 atdma->dma_common.device_terminate_all = atc_terminate_all;
1095 }
1096
1097 dma_writel(atdma, EN, AT_DMA_ENABLE);
1098
1099 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
1100 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1101 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
1102 atdma->dma_common.chancnt);
1103
1104 dma_async_device_register(&atdma->dma_common);
1105
1106 return 0;
1107
1108err_pool_create:
1109 platform_set_drvdata(pdev, NULL);
1110 free_irq(platform_get_irq(pdev, 0), atdma);
1111err_irq:
1112 clk_disable(atdma->clk);
1113 clk_put(atdma->clk);
1114err_clk:
1115 iounmap(atdma->regs);
1116 atdma->regs = NULL;
1117err_release_r:
1118 release_mem_region(io->start, size);
1119err_kfree:
1120 kfree(atdma);
1121 return err;
1122}
1123
1124static int __exit at_dma_remove(struct platform_device *pdev)
1125{
1126 struct at_dma *atdma = platform_get_drvdata(pdev);
1127 struct dma_chan *chan, *_chan;
1128 struct resource *io;
1129
1130 at_dma_off(atdma);
1131 dma_async_device_unregister(&atdma->dma_common);
1132
1133 dma_pool_destroy(atdma->dma_desc_pool);
1134 platform_set_drvdata(pdev, NULL);
1135 free_irq(platform_get_irq(pdev, 0), atdma);
1136
1137 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1138 device_node) {
1139 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1140
1141 /* Disable interrupts */
1142 atc_disable_irq(atchan);
1143 tasklet_disable(&atchan->tasklet);
1144
1145 tasklet_kill(&atchan->tasklet);
1146 list_del(&chan->device_node);
1147 }
1148
1149 clk_disable(atdma->clk);
1150 clk_put(atdma->clk);
1151
1152 iounmap(atdma->regs);
1153 atdma->regs = NULL;
1154
1155 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1156 release_mem_region(io->start, io->end - io->start + 1);
1157
1158 kfree(atdma);
1159
1160 return 0;
1161}
1162
1163static void at_dma_shutdown(struct platform_device *pdev)
1164{
1165 struct at_dma *atdma = platform_get_drvdata(pdev);
1166
1167 at_dma_off(platform_get_drvdata(pdev));
1168 clk_disable(atdma->clk);
1169}
1170
1171static int at_dma_suspend_late(struct platform_device *pdev, pm_message_t mesg)
1172{
1173 struct at_dma *atdma = platform_get_drvdata(pdev);
1174
1175 at_dma_off(platform_get_drvdata(pdev));
1176 clk_disable(atdma->clk);
1177 return 0;
1178}
1179
1180static int at_dma_resume_early(struct platform_device *pdev)
1181{
1182 struct at_dma *atdma = platform_get_drvdata(pdev);
1183
1184 clk_enable(atdma->clk);
1185 dma_writel(atdma, EN, AT_DMA_ENABLE);
1186 return 0;
1187
1188}
1189
1190static struct platform_driver at_dma_driver = {
1191 .remove = __exit_p(at_dma_remove),
1192 .shutdown = at_dma_shutdown,
1193 .suspend_late = at_dma_suspend_late,
1194 .resume_early = at_dma_resume_early,
1195 .driver = {
1196 .name = "at_hdmac",
1197 },
1198};
1199
1200static int __init at_dma_init(void)
1201{
1202 return platform_driver_probe(&at_dma_driver, at_dma_probe);
1203}
1204module_init(at_dma_init);
1205
1206static void __exit at_dma_exit(void)
1207{
1208 platform_driver_unregister(&at_dma_driver);
1209}
1210module_exit(at_dma_exit);
1211
1212MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
1213MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
1214MODULE_LICENSE("GPL");
1215MODULE_ALIAS("platform:at_hdmac");
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
new file mode 100644
index 000000000000..495457e3dc4b
--- /dev/null
+++ b/drivers/dma/at_hdmac_regs.h
@@ -0,0 +1,354 @@
1/*
2 * Header file for the Atmel AHB DMA Controller driver
3 *
4 * Copyright (C) 2008 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11#ifndef AT_HDMAC_REGS_H
12#define AT_HDMAC_REGS_H
13
14#include <mach/at_hdmac.h>
15
16#define AT_DMA_MAX_NR_CHANNELS 8
17
18
19#define AT_DMA_GCFG 0x00 /* Global Configuration Register */
20#define AT_DMA_IF_BIGEND(i) (0x1 << (i)) /* AHB-Lite Interface i in Big-endian mode */
21#define AT_DMA_ARB_CFG (0x1 << 4) /* Arbiter mode. */
22#define AT_DMA_ARB_CFG_FIXED (0x0 << 4)
23#define AT_DMA_ARB_CFG_ROUND_ROBIN (0x1 << 4)
24
25#define AT_DMA_EN 0x04 /* Controller Enable Register */
26#define AT_DMA_ENABLE (0x1 << 0)
27
28#define AT_DMA_SREQ 0x08 /* Software Single Request Register */
29#define AT_DMA_SSREQ(x) (0x1 << ((x) << 1)) /* Request a source single transfer on channel x */
30#define AT_DMA_DSREQ(x) (0x1 << (1 + ((x) << 1))) /* Request a destination single transfer on channel x */
31
32#define AT_DMA_CREQ 0x0C /* Software Chunk Transfer Request Register */
33#define AT_DMA_SCREQ(x) (0x1 << ((x) << 1)) /* Request a source chunk transfer on channel x */
34#define AT_DMA_DCREQ(x) (0x1 << (1 + ((x) << 1))) /* Request a destination chunk transfer on channel x */
35
36#define AT_DMA_LAST 0x10 /* Software Last Transfer Flag Register */
37#define AT_DMA_SLAST(x) (0x1 << ((x) << 1)) /* This src rq is last tx of buffer on channel x */
38#define AT_DMA_DLAST(x) (0x1 << (1 + ((x) << 1))) /* This dst rq is last tx of buffer on channel x */
39
40#define AT_DMA_SYNC 0x14 /* Request Synchronization Register */
41#define AT_DMA_SYR(h) (0x1 << (h)) /* Synchronize handshake line h */
42
43/* Error, Chained Buffer transfer completed and Buffer transfer completed Interrupt registers */
44#define AT_DMA_EBCIER 0x18 /* Enable register */
45#define AT_DMA_EBCIDR 0x1C /* Disable register */
46#define AT_DMA_EBCIMR 0x20 /* Mask Register */
47#define AT_DMA_EBCISR 0x24 /* Status Register */
48#define AT_DMA_CBTC_OFFSET 8
49#define AT_DMA_ERR_OFFSET 16
50#define AT_DMA_BTC(x) (0x1 << (x))
51#define AT_DMA_CBTC(x) (0x1 << (AT_DMA_CBTC_OFFSET + (x)))
52#define AT_DMA_ERR(x) (0x1 << (AT_DMA_ERR_OFFSET + (x)))
53
54#define AT_DMA_CHER 0x28 /* Channel Handler Enable Register */
55#define AT_DMA_ENA(x) (0x1 << (x))
56#define AT_DMA_SUSP(x) (0x1 << ( 8 + (x)))
57#define AT_DMA_KEEP(x) (0x1 << (24 + (x)))
58
59#define AT_DMA_CHDR 0x2C /* Channel Handler Disable Register */
60#define AT_DMA_DIS(x) (0x1 << (x))
61#define AT_DMA_RES(x) (0x1 << ( 8 + (x)))
62
63#define AT_DMA_CHSR 0x30 /* Channel Handler Status Register */
64#define AT_DMA_EMPT(x) (0x1 << (16 + (x)))
65#define AT_DMA_STAL(x) (0x1 << (24 + (x)))
66
67
68#define AT_DMA_CH_REGS_BASE 0x3C /* Channel registers base address */
69#define ch_regs(x) (AT_DMA_CH_REGS_BASE + (x) * 0x28) /* Channel x base addr */
70
71/* Hardware register offset for each channel */
72#define ATC_SADDR_OFFSET 0x00 /* Source Address Register */
73#define ATC_DADDR_OFFSET 0x04 /* Destination Address Register */
74#define ATC_DSCR_OFFSET 0x08 /* Descriptor Address Register */
75#define ATC_CTRLA_OFFSET 0x0C /* Control A Register */
76#define ATC_CTRLB_OFFSET 0x10 /* Control B Register */
77#define ATC_CFG_OFFSET 0x14 /* Configuration Register */
78#define ATC_SPIP_OFFSET 0x18 /* Src PIP Configuration Register */
79#define ATC_DPIP_OFFSET 0x1C /* Dst PIP Configuration Register */
80
81
82/* Bitfield definitions */
83
84/* Bitfields in DSCR */
85#define ATC_DSCR_IF(i) (0x3 & (i)) /* Dsc feched via AHB-Lite Interface i */
86
87/* Bitfields in CTRLA */
88#define ATC_BTSIZE_MAX 0xFFFFUL /* Maximum Buffer Transfer Size */
89#define ATC_BTSIZE(x) (ATC_BTSIZE_MAX & (x)) /* Buffer Transfer Size */
90/* Chunck Tranfer size definitions are in at_hdmac.h */
91#define ATC_SRC_WIDTH_MASK (0x3 << 24) /* Source Single Transfer Size */
92#define ATC_SRC_WIDTH(x) ((x) << 24)
93#define ATC_SRC_WIDTH_BYTE (0x0 << 24)
94#define ATC_SRC_WIDTH_HALFWORD (0x1 << 24)
95#define ATC_SRC_WIDTH_WORD (0x2 << 24)
96#define ATC_DST_WIDTH_MASK (0x3 << 28) /* Destination Single Transfer Size */
97#define ATC_DST_WIDTH(x) ((x) << 28)
98#define ATC_DST_WIDTH_BYTE (0x0 << 28)
99#define ATC_DST_WIDTH_HALFWORD (0x1 << 28)
100#define ATC_DST_WIDTH_WORD (0x2 << 28)
101#define ATC_DONE (0x1 << 31) /* Tx Done (only written back in descriptor) */
102
103/* Bitfields in CTRLB */
104#define ATC_SIF(i) (0x3 & (i)) /* Src tx done via AHB-Lite Interface i */
105#define ATC_DIF(i) ((0x3 & (i)) << 4) /* Dst tx done via AHB-Lite Interface i */
106#define ATC_SRC_PIP (0x1 << 8) /* Source Picture-in-Picture enabled */
107#define ATC_DST_PIP (0x1 << 12) /* Destination Picture-in-Picture enabled */
108#define ATC_SRC_DSCR_DIS (0x1 << 16) /* Src Descriptor fetch disable */
109#define ATC_DST_DSCR_DIS (0x1 << 20) /* Dst Descriptor fetch disable */
110#define ATC_FC_MASK (0x7 << 21) /* Choose Flow Controller */
111#define ATC_FC_MEM2MEM (0x0 << 21) /* Mem-to-Mem (DMA) */
112#define ATC_FC_MEM2PER (0x1 << 21) /* Mem-to-Periph (DMA) */
113#define ATC_FC_PER2MEM (0x2 << 21) /* Periph-to-Mem (DMA) */
114#define ATC_FC_PER2PER (0x3 << 21) /* Periph-to-Periph (DMA) */
115#define ATC_FC_PER2MEM_PER (0x4 << 21) /* Periph-to-Mem (Peripheral) */
116#define ATC_FC_MEM2PER_PER (0x5 << 21) /* Mem-to-Periph (Peripheral) */
117#define ATC_FC_PER2PER_SRCPER (0x6 << 21) /* Periph-to-Periph (Src Peripheral) */
118#define ATC_FC_PER2PER_DSTPER (0x7 << 21) /* Periph-to-Periph (Dst Peripheral) */
119#define ATC_SRC_ADDR_MODE_MASK (0x3 << 24)
120#define ATC_SRC_ADDR_MODE_INCR (0x0 << 24) /* Incrementing Mode */
121#define ATC_SRC_ADDR_MODE_DECR (0x1 << 24) /* Decrementing Mode */
122#define ATC_SRC_ADDR_MODE_FIXED (0x2 << 24) /* Fixed Mode */
123#define ATC_DST_ADDR_MODE_MASK (0x3 << 28)
124#define ATC_DST_ADDR_MODE_INCR (0x0 << 28) /* Incrementing Mode */
125#define ATC_DST_ADDR_MODE_DECR (0x1 << 28) /* Decrementing Mode */
126#define ATC_DST_ADDR_MODE_FIXED (0x2 << 28) /* Fixed Mode */
127#define ATC_IEN (0x1 << 30) /* BTC interrupt enable (active low) */
128#define ATC_AUTO (0x1 << 31) /* Auto multiple buffer tx enable */
129
130/* Bitfields in CFG */
131/* are in at_hdmac.h */
132
133/* Bitfields in SPIP */
134#define ATC_SPIP_HOLE(x) (0xFFFFU & (x))
135#define ATC_SPIP_BOUNDARY(x) ((0x3FF & (x)) << 16)
136
137/* Bitfields in DPIP */
138#define ATC_DPIP_HOLE(x) (0xFFFFU & (x))
139#define ATC_DPIP_BOUNDARY(x) ((0x3FF & (x)) << 16)
140
141
142/*-- descriptors -----------------------------------------------------*/
143
144/* LLI == Linked List Item; aka DMA buffer descriptor */
145struct at_lli {
146 /* values that are not changed by hardware */
147 dma_addr_t saddr;
148 dma_addr_t daddr;
149 /* value that may get written back: */
150 u32 ctrla;
151 /* more values that are not changed by hardware */
152 u32 ctrlb;
153 dma_addr_t dscr; /* chain to next lli */
154};
155
156/**
157 * struct at_desc - software descriptor
158 * @at_lli: hardware lli structure
159 * @txd: support for the async_tx api
160 * @desc_node: node on the channed descriptors list
161 * @len: total transaction bytecount
162 */
163struct at_desc {
164 /* FIRST values the hardware uses */
165 struct at_lli lli;
166
167 /* THEN values for driver housekeeping */
168 struct list_head tx_list;
169 struct dma_async_tx_descriptor txd;
170 struct list_head desc_node;
171 size_t len;
172};
173
174static inline struct at_desc *
175txd_to_at_desc(struct dma_async_tx_descriptor *txd)
176{
177 return container_of(txd, struct at_desc, txd);
178}
179
180
181/*-- Channels --------------------------------------------------------*/
182
183/**
184 * struct at_dma_chan - internal representation of an Atmel HDMAC channel
185 * @chan_common: common dmaengine channel object members
186 * @device: parent device
187 * @ch_regs: memory mapped register base
188 * @mask: channel index in a mask
189 * @error_status: transmit error status information from irq handler
190 * to tasklet (use atomic operations)
191 * @tasklet: bottom half to finish transaction work
192 * @lock: serializes enqueue/dequeue operations to descriptors lists
193 * @completed_cookie: identifier for the most recently completed operation
194 * @active_list: list of descriptors dmaengine is being running on
195 * @queue: list of descriptors ready to be submitted to engine
196 * @free_list: list of descriptors usable by the channel
197 * @descs_allocated: records the actual size of the descriptor pool
198 */
199struct at_dma_chan {
200 struct dma_chan chan_common;
201 struct at_dma *device;
202 void __iomem *ch_regs;
203 u8 mask;
204 unsigned long error_status;
205 struct tasklet_struct tasklet;
206
207 spinlock_t lock;
208
209 /* these other elements are all protected by lock */
210 dma_cookie_t completed_cookie;
211 struct list_head active_list;
212 struct list_head queue;
213 struct list_head free_list;
214 unsigned int descs_allocated;
215};
216
217#define channel_readl(atchan, name) \
218 __raw_readl((atchan)->ch_regs + ATC_##name##_OFFSET)
219
220#define channel_writel(atchan, name, val) \
221 __raw_writel((val), (atchan)->ch_regs + ATC_##name##_OFFSET)
222
223static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan)
224{
225 return container_of(dchan, struct at_dma_chan, chan_common);
226}
227
228
229/*-- Controller ------------------------------------------------------*/
230
231/**
232 * struct at_dma - internal representation of an Atmel HDMA Controller
233 * @chan_common: common dmaengine dma_device object members
234 * @ch_regs: memory mapped register base
235 * @clk: dma controller clock
236 * @all_chan_mask: all channels availlable in a mask
237 * @dma_desc_pool: base of DMA descriptor region (DMA address)
238 * @chan: channels table to store at_dma_chan structures
239 */
240struct at_dma {
241 struct dma_device dma_common;
242 void __iomem *regs;
243 struct clk *clk;
244
245 u8 all_chan_mask;
246
247 struct dma_pool *dma_desc_pool;
248 /* AT THE END channels table */
249 struct at_dma_chan chan[0];
250};
251
252#define dma_readl(atdma, name) \
253 __raw_readl((atdma)->regs + AT_DMA_##name)
254#define dma_writel(atdma, name, val) \
255 __raw_writel((val), (atdma)->regs + AT_DMA_##name)
256
257static inline struct at_dma *to_at_dma(struct dma_device *ddev)
258{
259 return container_of(ddev, struct at_dma, dma_common);
260}
261
262
263/*-- Helper functions ------------------------------------------------*/
264
265static struct device *chan2dev(struct dma_chan *chan)
266{
267 return &chan->dev->device;
268}
269static struct device *chan2parent(struct dma_chan *chan)
270{
271 return chan->dev->device.parent;
272}
273
274#if defined(VERBOSE_DEBUG)
275static void vdbg_dump_regs(struct at_dma_chan *atchan)
276{
277 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
278
279 dev_err(chan2dev(&atchan->chan_common),
280 " channel %d : imr = 0x%x, chsr = 0x%x\n",
281 atchan->chan_common.chan_id,
282 dma_readl(atdma, EBCIMR),
283 dma_readl(atdma, CHSR));
284
285 dev_err(chan2dev(&atchan->chan_common),
286 " channel: s0x%x d0x%x ctrl0x%x:0x%x cfg0x%x l0x%x\n",
287 channel_readl(atchan, SADDR),
288 channel_readl(atchan, DADDR),
289 channel_readl(atchan, CTRLA),
290 channel_readl(atchan, CTRLB),
291 channel_readl(atchan, CFG),
292 channel_readl(atchan, DSCR));
293}
294#else
295static void vdbg_dump_regs(struct at_dma_chan *atchan) {}
296#endif
297
298static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli)
299{
300 dev_printk(KERN_CRIT, chan2dev(&atchan->chan_common),
301 " desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
302 lli->saddr, lli->daddr,
303 lli->ctrla, lli->ctrlb, lli->dscr);
304}
305
306
307static void atc_setup_irq(struct at_dma_chan *atchan, int on)
308{
309 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
310 u32 ebci;
311
312 /* enable interrupts on buffer chain completion & error */
313 ebci = AT_DMA_CBTC(atchan->chan_common.chan_id)
314 | AT_DMA_ERR(atchan->chan_common.chan_id);
315 if (on)
316 dma_writel(atdma, EBCIER, ebci);
317 else
318 dma_writel(atdma, EBCIDR, ebci);
319}
320
321static inline void atc_enable_irq(struct at_dma_chan *atchan)
322{
323 atc_setup_irq(atchan, 1);
324}
325
326static inline void atc_disable_irq(struct at_dma_chan *atchan)
327{
328 atc_setup_irq(atchan, 0);
329}
330
331
332/**
333 * atc_chan_is_enabled - test if given channel is enabled
334 * @atchan: channel we want to test status
335 */
336static inline int atc_chan_is_enabled(struct at_dma_chan *atchan)
337{
338 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
339
340 return !!(dma_readl(atdma, CHSR) & atchan->mask);
341}
342
343
344/**
345 * set_desc_eol - set end-of-link to descriptor so it will end transfer
346 * @desc: descriptor, signle or at the end of a chain, to end chain on
347 */
348static void set_desc_eol(struct at_desc *desc)
349{
350 desc->lli.ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS;
351 desc->lli.dscr = 0;
352}
353
354#endif /* AT_HDMAC_REGS_H */
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index d5bc628d207c..bd0b248de2cf 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -977,7 +977,6 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
977{ 977{
978 tx->chan = chan; 978 tx->chan = chan;
979 spin_lock_init(&tx->lock); 979 spin_lock_init(&tx->lock);
980 INIT_LIST_HEAD(&tx->tx_list);
981} 980}
982EXPORT_SYMBOL(dma_async_tx_descriptor_init); 981EXPORT_SYMBOL(dma_async_tx_descriptor_init);
983 982
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index a3722a7384b5..a32a4cf7b1e0 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -38,6 +38,11 @@ module_param(max_channels, uint, S_IRUGO);
38MODULE_PARM_DESC(max_channels, 38MODULE_PARM_DESC(max_channels,
39 "Maximum number of channels to use (default: all)"); 39 "Maximum number of channels to use (default: all)");
40 40
41static unsigned int iterations;
42module_param(iterations, uint, S_IRUGO);
43MODULE_PARM_DESC(iterations,
44 "Iterations before stopping test (default: infinite)");
45
41static unsigned int xor_sources = 3; 46static unsigned int xor_sources = 3;
42module_param(xor_sources, uint, S_IRUGO); 47module_param(xor_sources, uint, S_IRUGO);
43MODULE_PARM_DESC(xor_sources, 48MODULE_PARM_DESC(xor_sources,
@@ -119,7 +124,7 @@ static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len)
119 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); 124 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
120 for ( ; i < start + len; i++) 125 for ( ; i < start + len; i++)
121 buf[i] = PATTERN_SRC | PATTERN_COPY 126 buf[i] = PATTERN_SRC | PATTERN_COPY
122 | (~i & PATTERN_COUNT_MASK);; 127 | (~i & PATTERN_COUNT_MASK);
123 for ( ; i < test_buf_size; i++) 128 for ( ; i < test_buf_size; i++)
124 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); 129 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
125 buf++; 130 buf++;
@@ -281,7 +286,8 @@ static int dmatest_func(void *data)
281 286
282 flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT; 287 flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT;
283 288
284 while (!kthread_should_stop()) { 289 while (!kthread_should_stop()
290 && !(iterations && total_tests >= iterations)) {
285 struct dma_device *dev = chan->device; 291 struct dma_device *dev = chan->device;
286 struct dma_async_tx_descriptor *tx = NULL; 292 struct dma_async_tx_descriptor *tx = NULL;
287 dma_addr_t dma_srcs[src_cnt]; 293 dma_addr_t dma_srcs[src_cnt];
@@ -450,6 +456,13 @@ err_srcbuf:
450err_srcs: 456err_srcs:
451 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", 457 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
452 thread_name, total_tests, failed_tests, ret); 458 thread_name, total_tests, failed_tests, ret);
459
460 if (iterations > 0)
461 while (!kthread_should_stop()) {
462 DECLARE_WAIT_QUEUE_HEAD(wait_dmatest_exit);
463 interruptible_sleep_on(&wait_dmatest_exit);
464 }
465
453 return ret; 466 return ret;
454} 467}
455 468
@@ -531,11 +544,11 @@ static int dmatest_add_channel(struct dma_chan *chan)
531 544
532 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { 545 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
533 cnt = dmatest_add_threads(dtc, DMA_MEMCPY); 546 cnt = dmatest_add_threads(dtc, DMA_MEMCPY);
534 thread_count += cnt > 0 ?: 0; 547 thread_count += cnt > 0 ? cnt : 0;
535 } 548 }
536 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 549 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
537 cnt = dmatest_add_threads(dtc, DMA_XOR); 550 cnt = dmatest_add_threads(dtc, DMA_XOR);
538 thread_count += cnt > 0 ?: 0; 551 thread_count += cnt > 0 ? cnt : 0;
539 } 552 }
540 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { 553 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
541 cnt = dmatest_add_threads(dtc, DMA_PQ); 554 cnt = dmatest_add_threads(dtc, DMA_PQ);
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 98c9a847bf51..8fb748280361 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -116,7 +116,7 @@ static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
116{ 116{
117 struct dw_desc *child; 117 struct dw_desc *child;
118 118
119 list_for_each_entry(child, &desc->txd.tx_list, desc_node) 119 list_for_each_entry(child, &desc->tx_list, desc_node)
120 dma_sync_single_for_cpu(chan2parent(&dwc->chan), 120 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
121 child->txd.phys, sizeof(child->lli), 121 child->txd.phys, sizeof(child->lli),
122 DMA_TO_DEVICE); 122 DMA_TO_DEVICE);
@@ -137,11 +137,11 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
137 dwc_sync_desc_for_cpu(dwc, desc); 137 dwc_sync_desc_for_cpu(dwc, desc);
138 138
139 spin_lock_bh(&dwc->lock); 139 spin_lock_bh(&dwc->lock);
140 list_for_each_entry(child, &desc->txd.tx_list, desc_node) 140 list_for_each_entry(child, &desc->tx_list, desc_node)
141 dev_vdbg(chan2dev(&dwc->chan), 141 dev_vdbg(chan2dev(&dwc->chan),
142 "moving child desc %p to freelist\n", 142 "moving child desc %p to freelist\n",
143 child); 143 child);
144 list_splice_init(&desc->txd.tx_list, &dwc->free_list); 144 list_splice_init(&desc->tx_list, &dwc->free_list);
145 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); 145 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
146 list_add(&desc->desc_node, &dwc->free_list); 146 list_add(&desc->desc_node, &dwc->free_list);
147 spin_unlock_bh(&dwc->lock); 147 spin_unlock_bh(&dwc->lock);
@@ -209,19 +209,28 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
209 param = txd->callback_param; 209 param = txd->callback_param;
210 210
211 dwc_sync_desc_for_cpu(dwc, desc); 211 dwc_sync_desc_for_cpu(dwc, desc);
212 list_splice_init(&txd->tx_list, &dwc->free_list); 212 list_splice_init(&desc->tx_list, &dwc->free_list);
213 list_move(&desc->desc_node, &dwc->free_list); 213 list_move(&desc->desc_node, &dwc->free_list);
214 214
215 /* 215 if (!dwc->chan.private) {
216 * We use dma_unmap_page() regardless of how the buffers were 216 struct device *parent = chan2parent(&dwc->chan);
217 * mapped before they were submitted... 217 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
218 */ 218 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
219 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) 219 dma_unmap_single(parent, desc->lli.dar,
220 dma_unmap_page(chan2parent(&dwc->chan), desc->lli.dar, 220 desc->len, DMA_FROM_DEVICE);
221 desc->len, DMA_FROM_DEVICE); 221 else
222 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) 222 dma_unmap_page(parent, desc->lli.dar,
223 dma_unmap_page(chan2parent(&dwc->chan), desc->lli.sar, 223 desc->len, DMA_FROM_DEVICE);
224 desc->len, DMA_TO_DEVICE); 224 }
225 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
226 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
227 dma_unmap_single(parent, desc->lli.sar,
228 desc->len, DMA_TO_DEVICE);
229 else
230 dma_unmap_page(parent, desc->lli.sar,
231 desc->len, DMA_TO_DEVICE);
232 }
233 }
225 234
226 /* 235 /*
227 * The API requires that no submissions are done from a 236 * The API requires that no submissions are done from a
@@ -289,7 +298,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
289 /* This one is currently in progress */ 298 /* This one is currently in progress */
290 return; 299 return;
291 300
292 list_for_each_entry(child, &desc->txd.tx_list, desc_node) 301 list_for_each_entry(child, &desc->tx_list, desc_node)
293 if (child->lli.llp == llp) 302 if (child->lli.llp == llp)
294 /* Currently in progress */ 303 /* Currently in progress */
295 return; 304 return;
@@ -356,7 +365,7 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
356 dev_printk(KERN_CRIT, chan2dev(&dwc->chan), 365 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
357 " cookie: %d\n", bad_desc->txd.cookie); 366 " cookie: %d\n", bad_desc->txd.cookie);
358 dwc_dump_lli(dwc, &bad_desc->lli); 367 dwc_dump_lli(dwc, &bad_desc->lli);
359 list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node) 368 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
360 dwc_dump_lli(dwc, &child->lli); 369 dwc_dump_lli(dwc, &child->lli);
361 370
362 /* Pretend the descriptor completed successfully */ 371 /* Pretend the descriptor completed successfully */
@@ -608,7 +617,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
608 prev->txd.phys, sizeof(prev->lli), 617 prev->txd.phys, sizeof(prev->lli),
609 DMA_TO_DEVICE); 618 DMA_TO_DEVICE);
610 list_add_tail(&desc->desc_node, 619 list_add_tail(&desc->desc_node,
611 &first->txd.tx_list); 620 &first->tx_list);
612 } 621 }
613 prev = desc; 622 prev = desc;
614 } 623 }
@@ -658,8 +667,6 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
658 reg_width = dws->reg_width; 667 reg_width = dws->reg_width;
659 prev = first = NULL; 668 prev = first = NULL;
660 669
661 sg_len = dma_map_sg(chan2parent(chan), sgl, sg_len, direction);
662
663 switch (direction) { 670 switch (direction) {
664 case DMA_TO_DEVICE: 671 case DMA_TO_DEVICE:
665 ctllo = (DWC_DEFAULT_CTLLO 672 ctllo = (DWC_DEFAULT_CTLLO
@@ -700,7 +707,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
700 sizeof(prev->lli), 707 sizeof(prev->lli),
701 DMA_TO_DEVICE); 708 DMA_TO_DEVICE);
702 list_add_tail(&desc->desc_node, 709 list_add_tail(&desc->desc_node,
703 &first->txd.tx_list); 710 &first->tx_list);
704 } 711 }
705 prev = desc; 712 prev = desc;
706 total_len += len; 713 total_len += len;
@@ -746,7 +753,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
746 sizeof(prev->lli), 753 sizeof(prev->lli),
747 DMA_TO_DEVICE); 754 DMA_TO_DEVICE);
748 list_add_tail(&desc->desc_node, 755 list_add_tail(&desc->desc_node,
749 &first->txd.tx_list); 756 &first->tx_list);
750 } 757 }
751 prev = desc; 758 prev = desc;
752 total_len += len; 759 total_len += len;
@@ -902,6 +909,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
902 break; 909 break;
903 } 910 }
904 911
912 INIT_LIST_HEAD(&desc->tx_list);
905 dma_async_tx_descriptor_init(&desc->txd, chan); 913 dma_async_tx_descriptor_init(&desc->txd, chan);
906 desc->txd.tx_submit = dwc_tx_submit; 914 desc->txd.tx_submit = dwc_tx_submit;
907 desc->txd.flags = DMA_CTRL_ACK; 915 desc->txd.flags = DMA_CTRL_ACK;
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index 13a580767031..d9a939f67f46 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -217,6 +217,7 @@ struct dw_desc {
217 217
218 /* THEN values for driver housekeeping */ 218 /* THEN values for driver housekeeping */
219 struct list_head desc_node; 219 struct list_head desc_node;
220 struct list_head tx_list;
220 struct dma_async_tx_descriptor txd; 221 struct dma_async_tx_descriptor txd;
221 size_t len; 222 size_t len;
222}; 223};
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index f18d1bde0439..296f9e747fac 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -12,6 +12,11 @@
12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc. 12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13 * The support for MPC8349 DMA contorller is also added. 13 * The support for MPC8349 DMA contorller is also added.
14 * 14 *
15 * This driver instructs the DMA controller to issue the PCI Read Multiple
16 * command for PCI read operations, instead of using the default PCI Read Line
17 * command. Please be aware that this setting may result in read pre-fetching
18 * on some platforms.
19 *
15 * This is free software; you can redistribute it and/or modify 20 * This is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by 21 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or 22 * the Free Software Foundation; either version 2 of the License, or
@@ -29,6 +34,7 @@
29#include <linux/dmapool.h> 34#include <linux/dmapool.h>
30#include <linux/of_platform.h> 35#include <linux/of_platform.h>
31 36
37#include <asm/fsldma.h>
32#include "fsldma.h" 38#include "fsldma.h"
33 39
34static void dma_init(struct fsl_dma_chan *fsl_chan) 40static void dma_init(struct fsl_dma_chan *fsl_chan)
@@ -49,9 +55,10 @@ static void dma_init(struct fsl_dma_chan *fsl_chan)
49 case FSL_DMA_IP_83XX: 55 case FSL_DMA_IP_83XX:
50 /* Set the channel to below modes: 56 /* Set the channel to below modes:
51 * EOTIE - End-of-transfer interrupt enable 57 * EOTIE - End-of-transfer interrupt enable
58 * PRC_RM - PCI read multiple
52 */ 59 */
53 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE, 60 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE
54 32); 61 | FSL_DMA_MR_PRC_RM, 32);
55 break; 62 break;
56 } 63 }
57 64
@@ -136,15 +143,16 @@ static int dma_is_idle(struct fsl_dma_chan *fsl_chan)
136 143
137static void dma_start(struct fsl_dma_chan *fsl_chan) 144static void dma_start(struct fsl_dma_chan *fsl_chan)
138{ 145{
139 u32 mr_set = 0;; 146 u32 mr_set = 0;
140 147
141 if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { 148 if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
142 DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32); 149 DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32);
143 mr_set |= FSL_DMA_MR_EMP_EN; 150 mr_set |= FSL_DMA_MR_EMP_EN;
144 } else 151 } else if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
145 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 152 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
146 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) 153 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
147 & ~FSL_DMA_MR_EMP_EN, 32); 154 & ~FSL_DMA_MR_EMP_EN, 32);
155 }
148 156
149 if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT) 157 if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT)
150 mr_set |= FSL_DMA_MR_EMS_EN; 158 mr_set |= FSL_DMA_MR_EMS_EN;
@@ -273,28 +281,40 @@ static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size)
273} 281}
274 282
275/** 283/**
276 * fsl_chan_toggle_ext_pause - Toggle channel external pause status 284 * fsl_chan_set_request_count - Set DMA Request Count for external control
277 * @fsl_chan : Freescale DMA channel 285 * @fsl_chan : Freescale DMA channel
278 * @size : Pause control size, 0 for disable external pause control. 286 * @size : Number of bytes to transfer in a single request
279 * The maximum is 1024. 287 *
288 * The Freescale DMA channel can be controlled by the external signal DREQ#.
289 * The DMA request count is how many bytes are allowed to transfer before
290 * pausing the channel, after which a new assertion of DREQ# resumes channel
291 * operation.
280 * 292 *
281 * The Freescale DMA channel can be controlled by the external 293 * A size of 0 disables external pause control. The maximum size is 1024.
282 * signal DREQ#. The pause control size is how many bytes are allowed
283 * to transfer before pausing the channel, after which a new assertion
284 * of DREQ# resumes channel operation.
285 */ 294 */
286static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int size) 295static void fsl_chan_set_request_count(struct fsl_dma_chan *fsl_chan, int size)
287{ 296{
288 if (size > 1024) 297 BUG_ON(size > 1024);
289 return; 298 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
299 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
300 | ((__ilog2(size) << 24) & 0x0f000000),
301 32);
302}
290 303
291 if (size) { 304/**
292 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 305 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
293 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) 306 * @fsl_chan : Freescale DMA channel
294 | ((__ilog2(size) << 24) & 0x0f000000), 307 * @enable : 0 is disabled, 1 is enabled.
295 32); 308 *
309 * The Freescale DMA channel can be controlled by the external signal DREQ#.
310 * The DMA Request Count feature should be used in addition to this feature
311 * to set the number of bytes to transfer before pausing the channel.
312 */
313static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable)
314{
315 if (enable)
296 fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; 316 fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
297 } else 317 else
298 fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; 318 fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
299} 319}
300 320
@@ -319,7 +339,8 @@ static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable)
319static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) 339static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
320{ 340{
321 struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); 341 struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan);
322 struct fsl_desc_sw *desc; 342 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
343 struct fsl_desc_sw *child;
323 unsigned long flags; 344 unsigned long flags;
324 dma_cookie_t cookie; 345 dma_cookie_t cookie;
325 346
@@ -327,7 +348,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
327 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 348 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
328 349
329 cookie = fsl_chan->common.cookie; 350 cookie = fsl_chan->common.cookie;
330 list_for_each_entry(desc, &tx->tx_list, node) { 351 list_for_each_entry(child, &desc->tx_list, node) {
331 cookie++; 352 cookie++;
332 if (cookie < 0) 353 if (cookie < 0)
333 cookie = 1; 354 cookie = 1;
@@ -336,8 +357,8 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
336 } 357 }
337 358
338 fsl_chan->common.cookie = cookie; 359 fsl_chan->common.cookie = cookie;
339 append_ld_queue(fsl_chan, tx_to_fsl_desc(tx)); 360 append_ld_queue(fsl_chan, desc);
340 list_splice_init(&tx->tx_list, fsl_chan->ld_queue.prev); 361 list_splice_init(&desc->tx_list, fsl_chan->ld_queue.prev);
341 362
342 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 363 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
343 364
@@ -359,6 +380,7 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
359 desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc); 380 desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc);
360 if (desc_sw) { 381 if (desc_sw) {
361 memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); 382 memset(desc_sw, 0, sizeof(struct fsl_desc_sw));
383 INIT_LIST_HEAD(&desc_sw->tx_list);
362 dma_async_tx_descriptor_init(&desc_sw->async_tx, 384 dma_async_tx_descriptor_init(&desc_sw->async_tx,
363 &fsl_chan->common); 385 &fsl_chan->common);
364 desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; 386 desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
@@ -448,7 +470,7 @@ fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags)
448 new->async_tx.flags = flags; 470 new->async_tx.flags = flags;
449 471
450 /* Insert the link descriptor to the LD ring */ 472 /* Insert the link descriptor to the LD ring */
451 list_add_tail(&new->node, &new->async_tx.tx_list); 473 list_add_tail(&new->node, &new->tx_list);
452 474
453 /* Set End-of-link to the last link descriptor of new list*/ 475 /* Set End-of-link to the last link descriptor of new list*/
454 set_ld_eol(fsl_chan, new); 476 set_ld_eol(fsl_chan, new);
@@ -506,7 +528,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
506 dma_dest += copy; 528 dma_dest += copy;
507 529
508 /* Insert the link descriptor to the LD ring */ 530 /* Insert the link descriptor to the LD ring */
509 list_add_tail(&new->node, &first->async_tx.tx_list); 531 list_add_tail(&new->node, &first->tx_list);
510 } while (len); 532 } while (len);
511 533
512 new->async_tx.flags = flags; /* client is in control of this ack */ 534 new->async_tx.flags = flags; /* client is in control of this ack */
@@ -521,7 +543,7 @@ fail:
521 if (!first) 543 if (!first)
522 return NULL; 544 return NULL;
523 545
524 list = &first->async_tx.tx_list; 546 list = &first->tx_list;
525 list_for_each_entry_safe_reverse(new, prev, list, node) { 547 list_for_each_entry_safe_reverse(new, prev, list, node) {
526 list_del(&new->node); 548 list_del(&new->node);
527 dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); 549 dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys);
@@ -531,6 +553,229 @@ fail:
531} 553}
532 554
533/** 555/**
556 * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
557 * @chan: DMA channel
558 * @sgl: scatterlist to transfer to/from
559 * @sg_len: number of entries in @scatterlist
560 * @direction: DMA direction
561 * @flags: DMAEngine flags
562 *
563 * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
564 * DMA_SLAVE API, this gets the device-specific information from the
565 * chan->private variable.
566 */
567static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
568 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
569 enum dma_data_direction direction, unsigned long flags)
570{
571 struct fsl_dma_chan *fsl_chan;
572 struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
573 struct fsl_dma_slave *slave;
574 struct list_head *tx_list;
575 size_t copy;
576
577 int i;
578 struct scatterlist *sg;
579 size_t sg_used;
580 size_t hw_used;
581 struct fsl_dma_hw_addr *hw;
582 dma_addr_t dma_dst, dma_src;
583
584 if (!chan)
585 return NULL;
586
587 if (!chan->private)
588 return NULL;
589
590 fsl_chan = to_fsl_chan(chan);
591 slave = chan->private;
592
593 if (list_empty(&slave->addresses))
594 return NULL;
595
596 hw = list_first_entry(&slave->addresses, struct fsl_dma_hw_addr, entry);
597 hw_used = 0;
598
599 /*
600 * Build the hardware transaction to copy from the scatterlist to
601 * the hardware, or from the hardware to the scatterlist
602 *
603 * If you are copying from the hardware to the scatterlist and it
604 * takes two hardware entries to fill an entire page, then both
605 * hardware entries will be coalesced into the same page
606 *
607 * If you are copying from the scatterlist to the hardware and a
608 * single page can fill two hardware entries, then the data will
609 * be read out of the page into the first hardware entry, and so on
610 */
611 for_each_sg(sgl, sg, sg_len, i) {
612 sg_used = 0;
613
614 /* Loop until the entire scatterlist entry is used */
615 while (sg_used < sg_dma_len(sg)) {
616
617 /*
618 * If we've used up the current hardware address/length
619 * pair, we need to load a new one
620 *
621 * This is done in a while loop so that descriptors with
622 * length == 0 will be skipped
623 */
624 while (hw_used >= hw->length) {
625
626 /*
627 * If the current hardware entry is the last
628 * entry in the list, we're finished
629 */
630 if (list_is_last(&hw->entry, &slave->addresses))
631 goto finished;
632
633 /* Get the next hardware address/length pair */
634 hw = list_entry(hw->entry.next,
635 struct fsl_dma_hw_addr, entry);
636 hw_used = 0;
637 }
638
639 /* Allocate the link descriptor from DMA pool */
640 new = fsl_dma_alloc_descriptor(fsl_chan);
641 if (!new) {
642 dev_err(fsl_chan->dev, "No free memory for "
643 "link descriptor\n");
644 goto fail;
645 }
646#ifdef FSL_DMA_LD_DEBUG
647 dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
648#endif
649
650 /*
651 * Calculate the maximum number of bytes to transfer,
652 * making sure it is less than the DMA controller limit
653 */
654 copy = min_t(size_t, sg_dma_len(sg) - sg_used,
655 hw->length - hw_used);
656 copy = min_t(size_t, copy, FSL_DMA_BCR_MAX_CNT);
657
658 /*
659 * DMA_FROM_DEVICE
660 * from the hardware to the scatterlist
661 *
662 * DMA_TO_DEVICE
663 * from the scatterlist to the hardware
664 */
665 if (direction == DMA_FROM_DEVICE) {
666 dma_src = hw->address + hw_used;
667 dma_dst = sg_dma_address(sg) + sg_used;
668 } else {
669 dma_src = sg_dma_address(sg) + sg_used;
670 dma_dst = hw->address + hw_used;
671 }
672
673 /* Fill in the descriptor */
674 set_desc_cnt(fsl_chan, &new->hw, copy);
675 set_desc_src(fsl_chan, &new->hw, dma_src);
676 set_desc_dest(fsl_chan, &new->hw, dma_dst);
677
678 /*
679 * If this is not the first descriptor, chain the
680 * current descriptor after the previous descriptor
681 */
682 if (!first) {
683 first = new;
684 } else {
685 set_desc_next(fsl_chan, &prev->hw,
686 new->async_tx.phys);
687 }
688
689 new->async_tx.cookie = 0;
690 async_tx_ack(&new->async_tx);
691
692 prev = new;
693 sg_used += copy;
694 hw_used += copy;
695
696 /* Insert the link descriptor into the LD ring */
697 list_add_tail(&new->node, &first->tx_list);
698 }
699 }
700
701finished:
702
703 /* All of the hardware address/length pairs had length == 0 */
704 if (!first || !new)
705 return NULL;
706
707 new->async_tx.flags = flags;
708 new->async_tx.cookie = -EBUSY;
709
710 /* Set End-of-link to the last link descriptor of new list */
711 set_ld_eol(fsl_chan, new);
712
713 /* Enable extra controller features */
714 if (fsl_chan->set_src_loop_size)
715 fsl_chan->set_src_loop_size(fsl_chan, slave->src_loop_size);
716
717 if (fsl_chan->set_dest_loop_size)
718 fsl_chan->set_dest_loop_size(fsl_chan, slave->dst_loop_size);
719
720 if (fsl_chan->toggle_ext_start)
721 fsl_chan->toggle_ext_start(fsl_chan, slave->external_start);
722
723 if (fsl_chan->toggle_ext_pause)
724 fsl_chan->toggle_ext_pause(fsl_chan, slave->external_pause);
725
726 if (fsl_chan->set_request_count)
727 fsl_chan->set_request_count(fsl_chan, slave->request_count);
728
729 return &first->async_tx;
730
731fail:
732 /* If first was not set, then we failed to allocate the very first
733 * descriptor, and we're done */
734 if (!first)
735 return NULL;
736
737 /*
738 * First is set, so all of the descriptors we allocated have been added
739 * to first->tx_list, INCLUDING "first" itself. Therefore we
740 * must traverse the list backwards freeing each descriptor in turn
741 *
742 * We're re-using variables for the loop, oh well
743 */
744 tx_list = &first->tx_list;
745 list_for_each_entry_safe_reverse(new, prev, tx_list, node) {
746 list_del_init(&new->node);
747 dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys);
748 }
749
750 return NULL;
751}
752
753static void fsl_dma_device_terminate_all(struct dma_chan *chan)
754{
755 struct fsl_dma_chan *fsl_chan;
756 struct fsl_desc_sw *desc, *tmp;
757 unsigned long flags;
758
759 if (!chan)
760 return;
761
762 fsl_chan = to_fsl_chan(chan);
763
764 /* Halt the DMA engine */
765 dma_halt(fsl_chan);
766
767 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
768
769 /* Remove and free all of the descriptors in the LD queue */
770 list_for_each_entry_safe(desc, tmp, &fsl_chan->ld_queue, node) {
771 list_del(&desc->node);
772 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
773 }
774
775 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
776}
777
778/**
534 * fsl_dma_update_completed_cookie - Update the completed cookie. 779 * fsl_dma_update_completed_cookie - Update the completed cookie.
535 * @fsl_chan : Freescale DMA channel 780 * @fsl_chan : Freescale DMA channel
536 */ 781 */
@@ -871,11 +1116,12 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
871 1116
872 switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) { 1117 switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) {
873 case FSL_DMA_IP_85XX: 1118 case FSL_DMA_IP_85XX:
874 new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start;
875 new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; 1119 new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
876 case FSL_DMA_IP_83XX: 1120 case FSL_DMA_IP_83XX:
1121 new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start;
877 new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size; 1122 new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size;
878 new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size; 1123 new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size;
1124 new_fsl_chan->set_request_count = fsl_chan_set_request_count;
879 } 1125 }
880 1126
881 spin_lock_init(&new_fsl_chan->desc_lock); 1127 spin_lock_init(&new_fsl_chan->desc_lock);
@@ -955,12 +1201,15 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
955 1201
956 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); 1202 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
957 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); 1203 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
1204 dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
958 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; 1205 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
959 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; 1206 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
960 fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; 1207 fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
961 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; 1208 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
962 fdev->common.device_is_tx_complete = fsl_dma_is_complete; 1209 fdev->common.device_is_tx_complete = fsl_dma_is_complete;
963 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; 1210 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
1211 fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
1212 fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
964 fdev->common.dev = &dev->dev; 1213 fdev->common.dev = &dev->dev;
965 1214
966 fdev->irq = irq_of_parse_and_map(dev->node, 0); 1215 fdev->irq = irq_of_parse_and_map(dev->node, 0);
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 4f21a512d848..0df14cbb8ca3 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -38,6 +38,7 @@
38 38
39/* Special MR definition for MPC8349 */ 39/* Special MR definition for MPC8349 */
40#define FSL_DMA_MR_EOTIE 0x00000080 40#define FSL_DMA_MR_EOTIE 0x00000080
41#define FSL_DMA_MR_PRC_RM 0x00000800
41 42
42#define FSL_DMA_SR_CH 0x00000020 43#define FSL_DMA_SR_CH 0x00000020
43#define FSL_DMA_SR_PE 0x00000010 44#define FSL_DMA_SR_PE 0x00000010
@@ -89,6 +90,7 @@ struct fsl_dma_ld_hw {
89struct fsl_desc_sw { 90struct fsl_desc_sw {
90 struct fsl_dma_ld_hw hw; 91 struct fsl_dma_ld_hw hw;
91 struct list_head node; 92 struct list_head node;
93 struct list_head tx_list;
92 struct dma_async_tx_descriptor async_tx; 94 struct dma_async_tx_descriptor async_tx;
93 struct list_head *ld; 95 struct list_head *ld;
94 void *priv; 96 void *priv;
@@ -142,10 +144,11 @@ struct fsl_dma_chan {
142 struct tasklet_struct tasklet; 144 struct tasklet_struct tasklet;
143 u32 feature; 145 u32 feature;
144 146
145 void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int size); 147 void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int enable);
146 void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable); 148 void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable);
147 void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size); 149 void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size);
148 void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size); 150 void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size);
151 void (*set_request_count)(struct fsl_dma_chan *fsl_chan, int size);
149}; 152};
150 153
151#define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common) 154#define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common)
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 32a757be75c1..c524d36d3c2e 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -251,12 +251,12 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
251 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); 251 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
252 252
253 /* write address into NextDescriptor field of last desc in chain */ 253 /* write address into NextDescriptor field of last desc in chain */
254 first = to_ioat_desc(tx->tx_list.next); 254 first = to_ioat_desc(desc->tx_list.next);
255 chain_tail = to_ioat_desc(ioat->used_desc.prev); 255 chain_tail = to_ioat_desc(ioat->used_desc.prev);
256 /* make descriptor updates globally visible before chaining */ 256 /* make descriptor updates globally visible before chaining */
257 wmb(); 257 wmb();
258 chain_tail->hw->next = first->txd.phys; 258 chain_tail->hw->next = first->txd.phys;
259 list_splice_tail_init(&tx->tx_list, &ioat->used_desc); 259 list_splice_tail_init(&desc->tx_list, &ioat->used_desc);
260 dump_desc_dbg(ioat, chain_tail); 260 dump_desc_dbg(ioat, chain_tail);
261 dump_desc_dbg(ioat, first); 261 dump_desc_dbg(ioat, first);
262 262
@@ -298,6 +298,7 @@ ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags)
298 298
299 memset(desc, 0, sizeof(*desc)); 299 memset(desc, 0, sizeof(*desc));
300 300
301 INIT_LIST_HEAD(&desc_sw->tx_list);
301 dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common); 302 dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common);
302 desc_sw->txd.tx_submit = ioat1_tx_submit; 303 desc_sw->txd.tx_submit = ioat1_tx_submit;
303 desc_sw->hw = desc; 304 desc_sw->hw = desc;
@@ -522,7 +523,7 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
522 523
523 desc->txd.flags = flags; 524 desc->txd.flags = flags;
524 desc->len = total_len; 525 desc->len = total_len;
525 list_splice(&chain, &desc->txd.tx_list); 526 list_splice(&chain, &desc->tx_list);
526 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); 527 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
527 hw->ctl_f.compl_write = 1; 528 hw->ctl_f.compl_write = 1;
528 hw->tx_cnt = tx_cnt; 529 hw->tx_cnt = tx_cnt;
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 0e37e426c729..6a675a2a2d1c 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -171,7 +171,7 @@ ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie,
171 * struct ioat_desc_sw - wrapper around hardware descriptor 171 * struct ioat_desc_sw - wrapper around hardware descriptor
172 * @hw: hardware DMA descriptor (for memcpy) 172 * @hw: hardware DMA descriptor (for memcpy)
173 * @node: this descriptor will either be on the free list, 173 * @node: this descriptor will either be on the free list,
174 * or attached to a transaction list (async_tx.tx_list) 174 * or attached to a transaction list (tx_list)
175 * @txd: the generic software descriptor for all engines 175 * @txd: the generic software descriptor for all engines
176 * @id: identifier for debug 176 * @id: identifier for debug
177 */ 177 */
@@ -179,6 +179,7 @@ struct ioat_desc_sw {
179 struct ioat_dma_descriptor *hw; 179 struct ioat_dma_descriptor *hw;
180 struct list_head node; 180 struct list_head node;
181 size_t len; 181 size_t len;
182 struct list_head tx_list;
182 struct dma_async_tx_descriptor txd; 183 struct dma_async_tx_descriptor txd;
183 #ifdef DEBUG 184 #ifdef DEBUG
184 int id; 185 int id;
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 7bbbd83d12e6..5d6ac49e0d32 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -397,11 +397,12 @@ static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t f
397 return NULL; 397 return NULL;
398 memset(hw, 0, sizeof(*hw)); 398 memset(hw, 0, sizeof(*hw));
399 399
400 desc = kzalloc(sizeof(*desc), flags); 400 desc = kmem_cache_alloc(ioat2_cache, flags);
401 if (!desc) { 401 if (!desc) {
402 pci_pool_free(dma->dma_pool, hw, phys); 402 pci_pool_free(dma->dma_pool, hw, phys);
403 return NULL; 403 return NULL;
404 } 404 }
405 memset(desc, 0, sizeof(*desc));
405 406
406 dma_async_tx_descriptor_init(&desc->txd, chan); 407 dma_async_tx_descriptor_init(&desc->txd, chan);
407 desc->txd.tx_submit = ioat2_tx_submit_unlock; 408 desc->txd.tx_submit = ioat2_tx_submit_unlock;
@@ -416,7 +417,7 @@ static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *cha
416 417
417 dma = to_ioatdma_device(chan->device); 418 dma = to_ioatdma_device(chan->device);
418 pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys); 419 pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys);
419 kfree(desc); 420 kmem_cache_free(ioat2_cache, desc);
420} 421}
421 422
422static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags) 423static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index 246e646b1904..1d849ef74d5f 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -142,8 +142,8 @@ struct ioat_ring_ent {
142 struct ioat_pq_update_descriptor *pqu; 142 struct ioat_pq_update_descriptor *pqu;
143 struct ioat_raw_descriptor *raw; 143 struct ioat_raw_descriptor *raw;
144 }; 144 };
145 struct dma_async_tx_descriptor txd;
146 size_t len; 145 size_t len;
146 struct dma_async_tx_descriptor txd;
147 enum sum_check_flags *result; 147 enum sum_check_flags *result;
148 #ifdef DEBUG 148 #ifdef DEBUG
149 int id; 149 int id;
@@ -186,4 +186,5 @@ void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
186void ioat2_cleanup_tasklet(unsigned long data); 186void ioat2_cleanup_tasklet(unsigned long data);
187void ioat2_timer_event(unsigned long data); 187void ioat2_timer_event(unsigned long data);
188extern struct kobj_type ioat2_ktype; 188extern struct kobj_type ioat2_ktype;
189extern struct kmem_cache *ioat2_cache;
189#endif /* IOATDMA_V2_H */ 190#endif /* IOATDMA_V2_H */
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index b77d3a2864ad..c788fa266470 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -83,6 +83,8 @@ static int ioat_dca_enabled = 1;
83module_param(ioat_dca_enabled, int, 0644); 83module_param(ioat_dca_enabled, int, 0644);
84MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); 84MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
85 85
86struct kmem_cache *ioat2_cache;
87
86#define DRV_NAME "ioatdma" 88#define DRV_NAME "ioatdma"
87 89
88static struct pci_driver ioat_pci_driver = { 90static struct pci_driver ioat_pci_driver = {
@@ -182,15 +184,27 @@ static void __devexit ioat_remove(struct pci_dev *pdev)
182 184
183static int __init ioat_init_module(void) 185static int __init ioat_init_module(void)
184{ 186{
187 int err;
188
185 pr_info("%s: Intel(R) QuickData Technology Driver %s\n", 189 pr_info("%s: Intel(R) QuickData Technology Driver %s\n",
186 DRV_NAME, IOAT_DMA_VERSION); 190 DRV_NAME, IOAT_DMA_VERSION);
187 191
188 return pci_register_driver(&ioat_pci_driver); 192 ioat2_cache = kmem_cache_create("ioat2", sizeof(struct ioat_ring_ent),
193 0, SLAB_HWCACHE_ALIGN, NULL);
194 if (!ioat2_cache)
195 return -ENOMEM;
196
197 err = pci_register_driver(&ioat_pci_driver);
198 if (err)
199 kmem_cache_destroy(ioat2_cache);
200
201 return err;
189} 202}
190module_init(ioat_init_module); 203module_init(ioat_init_module);
191 204
192static void __exit ioat_exit_module(void) 205static void __exit ioat_exit_module(void)
193{ 206{
194 pci_unregister_driver(&ioat_pci_driver); 207 pci_unregister_driver(&ioat_pci_driver);
208 kmem_cache_destroy(ioat2_cache);
195} 209}
196module_exit(ioat_exit_module); 210module_exit(ioat_exit_module);
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 518f557ef857..645ca8d54ec4 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -421,7 +421,7 @@ retry:
421 } 421 }
422 alloc_tail->group_head = alloc_start; 422 alloc_tail->group_head = alloc_start;
423 alloc_tail->async_tx.cookie = -EBUSY; 423 alloc_tail->async_tx.cookie = -EBUSY;
424 list_splice(&chain, &alloc_tail->async_tx.tx_list); 424 list_splice(&chain, &alloc_tail->tx_list);
425 iop_chan->last_used = last_used; 425 iop_chan->last_used = last_used;
426 iop_desc_clear_next_desc(alloc_start); 426 iop_desc_clear_next_desc(alloc_start);
427 iop_desc_clear_next_desc(alloc_tail); 427 iop_desc_clear_next_desc(alloc_tail);
@@ -480,7 +480,7 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
480 480
481 old_chain_tail = list_entry(iop_chan->chain.prev, 481 old_chain_tail = list_entry(iop_chan->chain.prev,
482 struct iop_adma_desc_slot, chain_node); 482 struct iop_adma_desc_slot, chain_node);
483 list_splice_init(&sw_desc->async_tx.tx_list, 483 list_splice_init(&sw_desc->tx_list,
484 &old_chain_tail->chain_node); 484 &old_chain_tail->chain_node);
485 485
486 /* fix up the hardware chain */ 486 /* fix up the hardware chain */
@@ -547,6 +547,7 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
547 547
548 dma_async_tx_descriptor_init(&slot->async_tx, chan); 548 dma_async_tx_descriptor_init(&slot->async_tx, chan);
549 slot->async_tx.tx_submit = iop_adma_tx_submit; 549 slot->async_tx.tx_submit = iop_adma_tx_submit;
550 INIT_LIST_HEAD(&slot->tx_list);
550 INIT_LIST_HEAD(&slot->chain_node); 551 INIT_LIST_HEAD(&slot->chain_node);
551 INIT_LIST_HEAD(&slot->slot_node); 552 INIT_LIST_HEAD(&slot->slot_node);
552 hw_desc = (char *) iop_chan->device->dma_desc_pool; 553 hw_desc = (char *) iop_chan->device->dma_desc_pool;
@@ -1642,7 +1643,7 @@ static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
1642 if (sw_desc) { 1643 if (sw_desc) {
1643 grp_start = sw_desc->group_head; 1644 grp_start = sw_desc->group_head;
1644 1645
1645 list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain); 1646 list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
1646 async_tx_ack(&sw_desc->async_tx); 1647 async_tx_ack(&sw_desc->async_tx);
1647 iop_desc_init_memcpy(grp_start, 0); 1648 iop_desc_init_memcpy(grp_start, 0);
1648 iop_desc_set_byte_count(grp_start, iop_chan, 0); 1649 iop_desc_set_byte_count(grp_start, iop_chan, 0);
@@ -1698,7 +1699,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
1698 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 1699 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1699 if (sw_desc) { 1700 if (sw_desc) {
1700 grp_start = sw_desc->group_head; 1701 grp_start = sw_desc->group_head;
1701 list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain); 1702 list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
1702 async_tx_ack(&sw_desc->async_tx); 1703 async_tx_ack(&sw_desc->async_tx);
1703 iop_desc_init_null_xor(grp_start, 2, 0); 1704 iop_desc_init_null_xor(grp_start, 2, 0);
1704 iop_desc_set_byte_count(grp_start, iop_chan, 0); 1705 iop_desc_set_byte_count(grp_start, iop_chan, 0);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index ddab94f51224..466ab10c1ff1 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -517,7 +517,7 @@ retry:
517 } 517 }
518 alloc_tail->group_head = alloc_start; 518 alloc_tail->group_head = alloc_start;
519 alloc_tail->async_tx.cookie = -EBUSY; 519 alloc_tail->async_tx.cookie = -EBUSY;
520 list_splice(&chain, &alloc_tail->async_tx.tx_list); 520 list_splice(&chain, &alloc_tail->tx_list);
521 mv_chan->last_used = last_used; 521 mv_chan->last_used = last_used;
522 mv_desc_clear_next_desc(alloc_start); 522 mv_desc_clear_next_desc(alloc_start);
523 mv_desc_clear_next_desc(alloc_tail); 523 mv_desc_clear_next_desc(alloc_tail);
@@ -565,14 +565,14 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
565 cookie = mv_desc_assign_cookie(mv_chan, sw_desc); 565 cookie = mv_desc_assign_cookie(mv_chan, sw_desc);
566 566
567 if (list_empty(&mv_chan->chain)) 567 if (list_empty(&mv_chan->chain))
568 list_splice_init(&sw_desc->async_tx.tx_list, &mv_chan->chain); 568 list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
569 else { 569 else {
570 new_hw_chain = 0; 570 new_hw_chain = 0;
571 571
572 old_chain_tail = list_entry(mv_chan->chain.prev, 572 old_chain_tail = list_entry(mv_chan->chain.prev,
573 struct mv_xor_desc_slot, 573 struct mv_xor_desc_slot,
574 chain_node); 574 chain_node);
575 list_splice_init(&grp_start->async_tx.tx_list, 575 list_splice_init(&grp_start->tx_list,
576 &old_chain_tail->chain_node); 576 &old_chain_tail->chain_node);
577 577
578 if (!mv_can_chain(grp_start)) 578 if (!mv_can_chain(grp_start))
@@ -632,6 +632,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
632 slot->async_tx.tx_submit = mv_xor_tx_submit; 632 slot->async_tx.tx_submit = mv_xor_tx_submit;
633 INIT_LIST_HEAD(&slot->chain_node); 633 INIT_LIST_HEAD(&slot->chain_node);
634 INIT_LIST_HEAD(&slot->slot_node); 634 INIT_LIST_HEAD(&slot->slot_node);
635 INIT_LIST_HEAD(&slot->tx_list);
635 hw_desc = (char *) mv_chan->device->dma_desc_pool; 636 hw_desc = (char *) mv_chan->device->dma_desc_pool;
636 slot->async_tx.phys = 637 slot->async_tx.phys =
637 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; 638 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
@@ -1176,7 +1177,7 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
1176 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) 1177 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
1177 dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset; 1178 dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset;
1178 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1179 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1179 dma_dev->max_xor = 8; ; 1180 dma_dev->max_xor = 8;
1180 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; 1181 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1181 } 1182 }
1182 1183
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index 06cafe1ef521..977b592e976b 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -126,9 +126,8 @@ struct mv_xor_chan {
126 * @idx: pool index 126 * @idx: pool index
127 * @unmap_src_cnt: number of xor sources 127 * @unmap_src_cnt: number of xor sources
128 * @unmap_len: transaction bytecount 128 * @unmap_len: transaction bytecount
129 * @tx_list: list of slots that make up a multi-descriptor transaction
129 * @async_tx: support for the async_tx api 130 * @async_tx: support for the async_tx api
130 * @group_list: list of slots that make up a multi-descriptor transaction
131 * for example transfer lengths larger than the supported hw max
132 * @xor_check_result: result of zero sum 131 * @xor_check_result: result of zero sum
133 * @crc32_result: result crc calculation 132 * @crc32_result: result crc calculation
134 */ 133 */
@@ -145,6 +144,7 @@ struct mv_xor_desc_slot {
145 u16 unmap_src_cnt; 144 u16 unmap_src_cnt;
146 u32 value; 145 u32 value;
147 size_t unmap_len; 146 size_t unmap_len;
147 struct list_head tx_list;
148 struct dma_async_tx_descriptor async_tx; 148 struct dma_async_tx_descriptor async_tx;
149 union { 149 union {
150 u32 *xor_check_result; 150 u32 *xor_check_result;
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
new file mode 100644
index 000000000000..197c7bc37895
--- /dev/null
+++ b/drivers/dma/txx9dmac.c
@@ -0,0 +1,1356 @@
1/*
2 * Driver for the TXx9 SoC DMA Controller
3 *
4 * Copyright (C) 2009 Atsushi Nemoto
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/dma-mapping.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/module.h>
15#include <linux/platform_device.h>
16#include <linux/slab.h>
17#include <linux/scatterlist.h>
18#include "txx9dmac.h"
19
20static struct txx9dmac_chan *to_txx9dmac_chan(struct dma_chan *chan)
21{
22 return container_of(chan, struct txx9dmac_chan, chan);
23}
24
25static struct txx9dmac_cregs __iomem *__dma_regs(const struct txx9dmac_chan *dc)
26{
27 return dc->ch_regs;
28}
29
30static struct txx9dmac_cregs32 __iomem *__dma_regs32(
31 const struct txx9dmac_chan *dc)
32{
33 return dc->ch_regs;
34}
35
36#define channel64_readq(dc, name) \
37 __raw_readq(&(__dma_regs(dc)->name))
38#define channel64_writeq(dc, name, val) \
39 __raw_writeq((val), &(__dma_regs(dc)->name))
40#define channel64_readl(dc, name) \
41 __raw_readl(&(__dma_regs(dc)->name))
42#define channel64_writel(dc, name, val) \
43 __raw_writel((val), &(__dma_regs(dc)->name))
44
45#define channel32_readl(dc, name) \
46 __raw_readl(&(__dma_regs32(dc)->name))
47#define channel32_writel(dc, name, val) \
48 __raw_writel((val), &(__dma_regs32(dc)->name))
49
50#define channel_readq(dc, name) channel64_readq(dc, name)
51#define channel_writeq(dc, name, val) channel64_writeq(dc, name, val)
52#define channel_readl(dc, name) \
53 (is_dmac64(dc) ? \
54 channel64_readl(dc, name) : channel32_readl(dc, name))
55#define channel_writel(dc, name, val) \
56 (is_dmac64(dc) ? \
57 channel64_writel(dc, name, val) : channel32_writel(dc, name, val))
58
59static dma_addr_t channel64_read_CHAR(const struct txx9dmac_chan *dc)
60{
61 if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64))
62 return channel64_readq(dc, CHAR);
63 else
64 return channel64_readl(dc, CHAR);
65}
66
67static void channel64_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val)
68{
69 if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64))
70 channel64_writeq(dc, CHAR, val);
71 else
72 channel64_writel(dc, CHAR, val);
73}
74
75static void channel64_clear_CHAR(const struct txx9dmac_chan *dc)
76{
77#if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR)
78 channel64_writel(dc, CHAR, 0);
79 channel64_writel(dc, __pad_CHAR, 0);
80#else
81 channel64_writeq(dc, CHAR, 0);
82#endif
83}
84
85static dma_addr_t channel_read_CHAR(const struct txx9dmac_chan *dc)
86{
87 if (is_dmac64(dc))
88 return channel64_read_CHAR(dc);
89 else
90 return channel32_readl(dc, CHAR);
91}
92
93static void channel_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val)
94{
95 if (is_dmac64(dc))
96 channel64_write_CHAR(dc, val);
97 else
98 channel32_writel(dc, CHAR, val);
99}
100
101static struct txx9dmac_regs __iomem *__txx9dmac_regs(
102 const struct txx9dmac_dev *ddev)
103{
104 return ddev->regs;
105}
106
107static struct txx9dmac_regs32 __iomem *__txx9dmac_regs32(
108 const struct txx9dmac_dev *ddev)
109{
110 return ddev->regs;
111}
112
113#define dma64_readl(ddev, name) \
114 __raw_readl(&(__txx9dmac_regs(ddev)->name))
115#define dma64_writel(ddev, name, val) \
116 __raw_writel((val), &(__txx9dmac_regs(ddev)->name))
117
118#define dma32_readl(ddev, name) \
119 __raw_readl(&(__txx9dmac_regs32(ddev)->name))
120#define dma32_writel(ddev, name, val) \
121 __raw_writel((val), &(__txx9dmac_regs32(ddev)->name))
122
123#define dma_readl(ddev, name) \
124 (__is_dmac64(ddev) ? \
125 dma64_readl(ddev, name) : dma32_readl(ddev, name))
126#define dma_writel(ddev, name, val) \
127 (__is_dmac64(ddev) ? \
128 dma64_writel(ddev, name, val) : dma32_writel(ddev, name, val))
129
130static struct device *chan2dev(struct dma_chan *chan)
131{
132 return &chan->dev->device;
133}
134static struct device *chan2parent(struct dma_chan *chan)
135{
136 return chan->dev->device.parent;
137}
138
139static struct txx9dmac_desc *
140txd_to_txx9dmac_desc(struct dma_async_tx_descriptor *txd)
141{
142 return container_of(txd, struct txx9dmac_desc, txd);
143}
144
145static dma_addr_t desc_read_CHAR(const struct txx9dmac_chan *dc,
146 const struct txx9dmac_desc *desc)
147{
148 return is_dmac64(dc) ? desc->hwdesc.CHAR : desc->hwdesc32.CHAR;
149}
150
151static void desc_write_CHAR(const struct txx9dmac_chan *dc,
152 struct txx9dmac_desc *desc, dma_addr_t val)
153{
154 if (is_dmac64(dc))
155 desc->hwdesc.CHAR = val;
156 else
157 desc->hwdesc32.CHAR = val;
158}
159
160#define TXX9_DMA_MAX_COUNT 0x04000000
161
162#define TXX9_DMA_INITIAL_DESC_COUNT 64
163
164static struct txx9dmac_desc *txx9dmac_first_active(struct txx9dmac_chan *dc)
165{
166 return list_entry(dc->active_list.next,
167 struct txx9dmac_desc, desc_node);
168}
169
170static struct txx9dmac_desc *txx9dmac_last_active(struct txx9dmac_chan *dc)
171{
172 return list_entry(dc->active_list.prev,
173 struct txx9dmac_desc, desc_node);
174}
175
176static struct txx9dmac_desc *txx9dmac_first_queued(struct txx9dmac_chan *dc)
177{
178 return list_entry(dc->queue.next, struct txx9dmac_desc, desc_node);
179}
180
181static struct txx9dmac_desc *txx9dmac_last_child(struct txx9dmac_desc *desc)
182{
183 if (!list_empty(&desc->tx_list))
184 desc = list_entry(desc->tx_list.prev, typeof(*desc), desc_node);
185 return desc;
186}
187
188static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx);
189
190static struct txx9dmac_desc *txx9dmac_desc_alloc(struct txx9dmac_chan *dc,
191 gfp_t flags)
192{
193 struct txx9dmac_dev *ddev = dc->ddev;
194 struct txx9dmac_desc *desc;
195
196 desc = kzalloc(sizeof(*desc), flags);
197 if (!desc)
198 return NULL;
199 INIT_LIST_HEAD(&desc->tx_list);
200 dma_async_tx_descriptor_init(&desc->txd, &dc->chan);
201 desc->txd.tx_submit = txx9dmac_tx_submit;
202 /* txd.flags will be overwritten in prep funcs */
203 desc->txd.flags = DMA_CTRL_ACK;
204 desc->txd.phys = dma_map_single(chan2parent(&dc->chan), &desc->hwdesc,
205 ddev->descsize, DMA_TO_DEVICE);
206 return desc;
207}
208
209static struct txx9dmac_desc *txx9dmac_desc_get(struct txx9dmac_chan *dc)
210{
211 struct txx9dmac_desc *desc, *_desc;
212 struct txx9dmac_desc *ret = NULL;
213 unsigned int i = 0;
214
215 spin_lock_bh(&dc->lock);
216 list_for_each_entry_safe(desc, _desc, &dc->free_list, desc_node) {
217 if (async_tx_test_ack(&desc->txd)) {
218 list_del(&desc->desc_node);
219 ret = desc;
220 break;
221 }
222 dev_dbg(chan2dev(&dc->chan), "desc %p not ACKed\n", desc);
223 i++;
224 }
225 spin_unlock_bh(&dc->lock);
226
227 dev_vdbg(chan2dev(&dc->chan), "scanned %u descriptors on freelist\n",
228 i);
229 if (!ret) {
230 ret = txx9dmac_desc_alloc(dc, GFP_ATOMIC);
231 if (ret) {
232 spin_lock_bh(&dc->lock);
233 dc->descs_allocated++;
234 spin_unlock_bh(&dc->lock);
235 } else
236 dev_err(chan2dev(&dc->chan),
237 "not enough descriptors available\n");
238 }
239 return ret;
240}
241
242static void txx9dmac_sync_desc_for_cpu(struct txx9dmac_chan *dc,
243 struct txx9dmac_desc *desc)
244{
245 struct txx9dmac_dev *ddev = dc->ddev;
246 struct txx9dmac_desc *child;
247
248 list_for_each_entry(child, &desc->tx_list, desc_node)
249 dma_sync_single_for_cpu(chan2parent(&dc->chan),
250 child->txd.phys, ddev->descsize,
251 DMA_TO_DEVICE);
252 dma_sync_single_for_cpu(chan2parent(&dc->chan),
253 desc->txd.phys, ddev->descsize,
254 DMA_TO_DEVICE);
255}
256
257/*
258 * Move a descriptor, including any children, to the free list.
259 * `desc' must not be on any lists.
260 */
261static void txx9dmac_desc_put(struct txx9dmac_chan *dc,
262 struct txx9dmac_desc *desc)
263{
264 if (desc) {
265 struct txx9dmac_desc *child;
266
267 txx9dmac_sync_desc_for_cpu(dc, desc);
268
269 spin_lock_bh(&dc->lock);
270 list_for_each_entry(child, &desc->tx_list, desc_node)
271 dev_vdbg(chan2dev(&dc->chan),
272 "moving child desc %p to freelist\n",
273 child);
274 list_splice_init(&desc->tx_list, &dc->free_list);
275 dev_vdbg(chan2dev(&dc->chan), "moving desc %p to freelist\n",
276 desc);
277 list_add(&desc->desc_node, &dc->free_list);
278 spin_unlock_bh(&dc->lock);
279 }
280}
281
282/* Called with dc->lock held and bh disabled */
283static dma_cookie_t
284txx9dmac_assign_cookie(struct txx9dmac_chan *dc, struct txx9dmac_desc *desc)
285{
286 dma_cookie_t cookie = dc->chan.cookie;
287
288 if (++cookie < 0)
289 cookie = 1;
290
291 dc->chan.cookie = cookie;
292 desc->txd.cookie = cookie;
293
294 return cookie;
295}
296
297/*----------------------------------------------------------------------*/
298
299static void txx9dmac_dump_regs(struct txx9dmac_chan *dc)
300{
301 if (is_dmac64(dc))
302 dev_err(chan2dev(&dc->chan),
303 " CHAR: %#llx SAR: %#llx DAR: %#llx CNTR: %#x"
304 " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n",
305 (u64)channel64_read_CHAR(dc),
306 channel64_readq(dc, SAR),
307 channel64_readq(dc, DAR),
308 channel64_readl(dc, CNTR),
309 channel64_readl(dc, SAIR),
310 channel64_readl(dc, DAIR),
311 channel64_readl(dc, CCR),
312 channel64_readl(dc, CSR));
313 else
314 dev_err(chan2dev(&dc->chan),
315 " CHAR: %#x SAR: %#x DAR: %#x CNTR: %#x"
316 " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n",
317 channel32_readl(dc, CHAR),
318 channel32_readl(dc, SAR),
319 channel32_readl(dc, DAR),
320 channel32_readl(dc, CNTR),
321 channel32_readl(dc, SAIR),
322 channel32_readl(dc, DAIR),
323 channel32_readl(dc, CCR),
324 channel32_readl(dc, CSR));
325}
326
327static void txx9dmac_reset_chan(struct txx9dmac_chan *dc)
328{
329 channel_writel(dc, CCR, TXX9_DMA_CCR_CHRST);
330 if (is_dmac64(dc)) {
331 channel64_clear_CHAR(dc);
332 channel_writeq(dc, SAR, 0);
333 channel_writeq(dc, DAR, 0);
334 } else {
335 channel_writel(dc, CHAR, 0);
336 channel_writel(dc, SAR, 0);
337 channel_writel(dc, DAR, 0);
338 }
339 channel_writel(dc, CNTR, 0);
340 channel_writel(dc, SAIR, 0);
341 channel_writel(dc, DAIR, 0);
342 channel_writel(dc, CCR, 0);
343 mmiowb();
344}
345
346/* Called with dc->lock held and bh disabled */
347static void txx9dmac_dostart(struct txx9dmac_chan *dc,
348 struct txx9dmac_desc *first)
349{
350 struct txx9dmac_slave *ds = dc->chan.private;
351 u32 sai, dai;
352
353 dev_vdbg(chan2dev(&dc->chan), "dostart %u %p\n",
354 first->txd.cookie, first);
355 /* ASSERT: channel is idle */
356 if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) {
357 dev_err(chan2dev(&dc->chan),
358 "BUG: Attempted to start non-idle channel\n");
359 txx9dmac_dump_regs(dc);
360 /* The tasklet will hopefully advance the queue... */
361 return;
362 }
363
364 if (is_dmac64(dc)) {
365 channel64_writel(dc, CNTR, 0);
366 channel64_writel(dc, CSR, 0xffffffff);
367 if (ds) {
368 if (ds->tx_reg) {
369 sai = ds->reg_width;
370 dai = 0;
371 } else {
372 sai = 0;
373 dai = ds->reg_width;
374 }
375 } else {
376 sai = 8;
377 dai = 8;
378 }
379 channel64_writel(dc, SAIR, sai);
380 channel64_writel(dc, DAIR, dai);
381 /* All 64-bit DMAC supports SMPCHN */
382 channel64_writel(dc, CCR, dc->ccr);
383 /* Writing a non zero value to CHAR will assert XFACT */
384 channel64_write_CHAR(dc, first->txd.phys);
385 } else {
386 channel32_writel(dc, CNTR, 0);
387 channel32_writel(dc, CSR, 0xffffffff);
388 if (ds) {
389 if (ds->tx_reg) {
390 sai = ds->reg_width;
391 dai = 0;
392 } else {
393 sai = 0;
394 dai = ds->reg_width;
395 }
396 } else {
397 sai = 4;
398 dai = 4;
399 }
400 channel32_writel(dc, SAIR, sai);
401 channel32_writel(dc, DAIR, dai);
402 if (txx9_dma_have_SMPCHN()) {
403 channel32_writel(dc, CCR, dc->ccr);
404 /* Writing a non zero value to CHAR will assert XFACT */
405 channel32_writel(dc, CHAR, first->txd.phys);
406 } else {
407 channel32_writel(dc, CHAR, first->txd.phys);
408 channel32_writel(dc, CCR, dc->ccr);
409 }
410 }
411}
412
413/*----------------------------------------------------------------------*/
414
415static void
416txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
417 struct txx9dmac_desc *desc)
418{
419 dma_async_tx_callback callback;
420 void *param;
421 struct dma_async_tx_descriptor *txd = &desc->txd;
422 struct txx9dmac_slave *ds = dc->chan.private;
423
424 dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
425 txd->cookie, desc);
426
427 dc->completed = txd->cookie;
428 callback = txd->callback;
429 param = txd->callback_param;
430
431 txx9dmac_sync_desc_for_cpu(dc, desc);
432 list_splice_init(&desc->tx_list, &dc->free_list);
433 list_move(&desc->desc_node, &dc->free_list);
434
435 if (!ds) {
436 dma_addr_t dmaaddr;
437 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
438 dmaaddr = is_dmac64(dc) ?
439 desc->hwdesc.DAR : desc->hwdesc32.DAR;
440 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
441 dma_unmap_single(chan2parent(&dc->chan),
442 dmaaddr, desc->len, DMA_FROM_DEVICE);
443 else
444 dma_unmap_page(chan2parent(&dc->chan),
445 dmaaddr, desc->len, DMA_FROM_DEVICE);
446 }
447 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
448 dmaaddr = is_dmac64(dc) ?
449 desc->hwdesc.SAR : desc->hwdesc32.SAR;
450 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
451 dma_unmap_single(chan2parent(&dc->chan),
452 dmaaddr, desc->len, DMA_TO_DEVICE);
453 else
454 dma_unmap_page(chan2parent(&dc->chan),
455 dmaaddr, desc->len, DMA_TO_DEVICE);
456 }
457 }
458
459 /*
460 * The API requires that no submissions are done from a
461 * callback, so we don't need to drop the lock here
462 */
463 if (callback)
464 callback(param);
465 dma_run_dependencies(txd);
466}
467
468static void txx9dmac_dequeue(struct txx9dmac_chan *dc, struct list_head *list)
469{
470 struct txx9dmac_dev *ddev = dc->ddev;
471 struct txx9dmac_desc *desc;
472 struct txx9dmac_desc *prev = NULL;
473
474 BUG_ON(!list_empty(list));
475 do {
476 desc = txx9dmac_first_queued(dc);
477 if (prev) {
478 desc_write_CHAR(dc, prev, desc->txd.phys);
479 dma_sync_single_for_device(chan2parent(&dc->chan),
480 prev->txd.phys, ddev->descsize,
481 DMA_TO_DEVICE);
482 }
483 prev = txx9dmac_last_child(desc);
484 list_move_tail(&desc->desc_node, list);
485 /* Make chain-completion interrupt happen */
486 if ((desc->txd.flags & DMA_PREP_INTERRUPT) &&
487 !txx9dmac_chan_INTENT(dc))
488 break;
489 } while (!list_empty(&dc->queue));
490}
491
492static void txx9dmac_complete_all(struct txx9dmac_chan *dc)
493{
494 struct txx9dmac_desc *desc, *_desc;
495 LIST_HEAD(list);
496
497 /*
498 * Submit queued descriptors ASAP, i.e. before we go through
499 * the completed ones.
500 */
501 list_splice_init(&dc->active_list, &list);
502 if (!list_empty(&dc->queue)) {
503 txx9dmac_dequeue(dc, &dc->active_list);
504 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
505 }
506
507 list_for_each_entry_safe(desc, _desc, &list, desc_node)
508 txx9dmac_descriptor_complete(dc, desc);
509}
510
511static void txx9dmac_dump_desc(struct txx9dmac_chan *dc,
512 struct txx9dmac_hwdesc *desc)
513{
514 if (is_dmac64(dc)) {
515#ifdef TXX9_DMA_USE_SIMPLE_CHAIN
516 dev_crit(chan2dev(&dc->chan),
517 " desc: ch%#llx s%#llx d%#llx c%#x\n",
518 (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR);
519#else
520 dev_crit(chan2dev(&dc->chan),
521 " desc: ch%#llx s%#llx d%#llx c%#x"
522 " si%#x di%#x cc%#x cs%#x\n",
523 (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR,
524 desc->SAIR, desc->DAIR, desc->CCR, desc->CSR);
525#endif
526 } else {
527 struct txx9dmac_hwdesc32 *d = (struct txx9dmac_hwdesc32 *)desc;
528#ifdef TXX9_DMA_USE_SIMPLE_CHAIN
529 dev_crit(chan2dev(&dc->chan),
530 " desc: ch%#x s%#x d%#x c%#x\n",
531 d->CHAR, d->SAR, d->DAR, d->CNTR);
532#else
533 dev_crit(chan2dev(&dc->chan),
534 " desc: ch%#x s%#x d%#x c%#x"
535 " si%#x di%#x cc%#x cs%#x\n",
536 d->CHAR, d->SAR, d->DAR, d->CNTR,
537 d->SAIR, d->DAIR, d->CCR, d->CSR);
538#endif
539 }
540}
541
542static void txx9dmac_handle_error(struct txx9dmac_chan *dc, u32 csr)
543{
544 struct txx9dmac_desc *bad_desc;
545 struct txx9dmac_desc *child;
546 u32 errors;
547
548 /*
549 * The descriptor currently at the head of the active list is
550 * borked. Since we don't have any way to report errors, we'll
551 * just have to scream loudly and try to carry on.
552 */
553 dev_crit(chan2dev(&dc->chan), "Abnormal Chain Completion\n");
554 txx9dmac_dump_regs(dc);
555
556 bad_desc = txx9dmac_first_active(dc);
557 list_del_init(&bad_desc->desc_node);
558
559 /* Clear all error flags and try to restart the controller */
560 errors = csr & (TXX9_DMA_CSR_ABCHC |
561 TXX9_DMA_CSR_CFERR | TXX9_DMA_CSR_CHERR |
562 TXX9_DMA_CSR_DESERR | TXX9_DMA_CSR_SORERR);
563 channel_writel(dc, CSR, errors);
564
565 if (list_empty(&dc->active_list) && !list_empty(&dc->queue))
566 txx9dmac_dequeue(dc, &dc->active_list);
567 if (!list_empty(&dc->active_list))
568 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
569
570 dev_crit(chan2dev(&dc->chan),
571 "Bad descriptor submitted for DMA! (cookie: %d)\n",
572 bad_desc->txd.cookie);
573 txx9dmac_dump_desc(dc, &bad_desc->hwdesc);
574 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
575 txx9dmac_dump_desc(dc, &child->hwdesc);
576 /* Pretend the descriptor completed successfully */
577 txx9dmac_descriptor_complete(dc, bad_desc);
578}
579
580static void txx9dmac_scan_descriptors(struct txx9dmac_chan *dc)
581{
582 dma_addr_t chain;
583 struct txx9dmac_desc *desc, *_desc;
584 struct txx9dmac_desc *child;
585 u32 csr;
586
587 if (is_dmac64(dc)) {
588 chain = channel64_read_CHAR(dc);
589 csr = channel64_readl(dc, CSR);
590 channel64_writel(dc, CSR, csr);
591 } else {
592 chain = channel32_readl(dc, CHAR);
593 csr = channel32_readl(dc, CSR);
594 channel32_writel(dc, CSR, csr);
595 }
596 /* For dynamic chain, we should look at XFACT instead of NCHNC */
597 if (!(csr & (TXX9_DMA_CSR_XFACT | TXX9_DMA_CSR_ABCHC))) {
598 /* Everything we've submitted is done */
599 txx9dmac_complete_all(dc);
600 return;
601 }
602 if (!(csr & TXX9_DMA_CSR_CHNEN))
603 chain = 0; /* last descriptor of this chain */
604
605 dev_vdbg(chan2dev(&dc->chan), "scan_descriptors: char=%#llx\n",
606 (u64)chain);
607
608 list_for_each_entry_safe(desc, _desc, &dc->active_list, desc_node) {
609 if (desc_read_CHAR(dc, desc) == chain) {
610 /* This one is currently in progress */
611 if (csr & TXX9_DMA_CSR_ABCHC)
612 goto scan_done;
613 return;
614 }
615
616 list_for_each_entry(child, &desc->tx_list, desc_node)
617 if (desc_read_CHAR(dc, child) == chain) {
618 /* Currently in progress */
619 if (csr & TXX9_DMA_CSR_ABCHC)
620 goto scan_done;
621 return;
622 }
623
624 /*
625 * No descriptors so far seem to be in progress, i.e.
626 * this one must be done.
627 */
628 txx9dmac_descriptor_complete(dc, desc);
629 }
630scan_done:
631 if (csr & TXX9_DMA_CSR_ABCHC) {
632 txx9dmac_handle_error(dc, csr);
633 return;
634 }
635
636 dev_err(chan2dev(&dc->chan),
637 "BUG: All descriptors done, but channel not idle!\n");
638
639 /* Try to continue after resetting the channel... */
640 txx9dmac_reset_chan(dc);
641
642 if (!list_empty(&dc->queue)) {
643 txx9dmac_dequeue(dc, &dc->active_list);
644 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
645 }
646}
647
648static void txx9dmac_chan_tasklet(unsigned long data)
649{
650 int irq;
651 u32 csr;
652 struct txx9dmac_chan *dc;
653
654 dc = (struct txx9dmac_chan *)data;
655 csr = channel_readl(dc, CSR);
656 dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", csr);
657
658 spin_lock(&dc->lock);
659 if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC |
660 TXX9_DMA_CSR_NTRNFC))
661 txx9dmac_scan_descriptors(dc);
662 spin_unlock(&dc->lock);
663 irq = dc->irq;
664
665 enable_irq(irq);
666}
667
668static irqreturn_t txx9dmac_chan_interrupt(int irq, void *dev_id)
669{
670 struct txx9dmac_chan *dc = dev_id;
671
672 dev_vdbg(chan2dev(&dc->chan), "interrupt: status=%#x\n",
673 channel_readl(dc, CSR));
674
675 tasklet_schedule(&dc->tasklet);
676 /*
677 * Just disable the interrupts. We'll turn them back on in the
678 * softirq handler.
679 */
680 disable_irq_nosync(irq);
681
682 return IRQ_HANDLED;
683}
684
685static void txx9dmac_tasklet(unsigned long data)
686{
687 int irq;
688 u32 csr;
689 struct txx9dmac_chan *dc;
690
691 struct txx9dmac_dev *ddev = (struct txx9dmac_dev *)data;
692 u32 mcr;
693 int i;
694
695 mcr = dma_readl(ddev, MCR);
696 dev_vdbg(ddev->chan[0]->dma.dev, "tasklet: mcr=%x\n", mcr);
697 for (i = 0; i < TXX9_DMA_MAX_NR_CHANNELS; i++) {
698 if ((mcr >> (24 + i)) & 0x11) {
699 dc = ddev->chan[i];
700 csr = channel_readl(dc, CSR);
701 dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n",
702 csr);
703 spin_lock(&dc->lock);
704 if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC |
705 TXX9_DMA_CSR_NTRNFC))
706 txx9dmac_scan_descriptors(dc);
707 spin_unlock(&dc->lock);
708 }
709 }
710 irq = ddev->irq;
711
712 enable_irq(irq);
713}
714
715static irqreturn_t txx9dmac_interrupt(int irq, void *dev_id)
716{
717 struct txx9dmac_dev *ddev = dev_id;
718
719 dev_vdbg(ddev->chan[0]->dma.dev, "interrupt: status=%#x\n",
720 dma_readl(ddev, MCR));
721
722 tasklet_schedule(&ddev->tasklet);
723 /*
724 * Just disable the interrupts. We'll turn them back on in the
725 * softirq handler.
726 */
727 disable_irq_nosync(irq);
728
729 return IRQ_HANDLED;
730}
731
732/*----------------------------------------------------------------------*/
733
734static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx)
735{
736 struct txx9dmac_desc *desc = txd_to_txx9dmac_desc(tx);
737 struct txx9dmac_chan *dc = to_txx9dmac_chan(tx->chan);
738 dma_cookie_t cookie;
739
740 spin_lock_bh(&dc->lock);
741 cookie = txx9dmac_assign_cookie(dc, desc);
742
743 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n",
744 desc->txd.cookie, desc);
745
746 list_add_tail(&desc->desc_node, &dc->queue);
747 spin_unlock_bh(&dc->lock);
748
749 return cookie;
750}
751
752static struct dma_async_tx_descriptor *
753txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
754 size_t len, unsigned long flags)
755{
756 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
757 struct txx9dmac_dev *ddev = dc->ddev;
758 struct txx9dmac_desc *desc;
759 struct txx9dmac_desc *first;
760 struct txx9dmac_desc *prev;
761 size_t xfer_count;
762 size_t offset;
763
764 dev_vdbg(chan2dev(chan), "prep_dma_memcpy d%#llx s%#llx l%#zx f%#lx\n",
765 (u64)dest, (u64)src, len, flags);
766
767 if (unlikely(!len)) {
768 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
769 return NULL;
770 }
771
772 prev = first = NULL;
773
774 for (offset = 0; offset < len; offset += xfer_count) {
775 xfer_count = min_t(size_t, len - offset, TXX9_DMA_MAX_COUNT);
776 /*
777 * Workaround for ERT-TX49H2-033, ERT-TX49H3-020,
778 * ERT-TX49H4-016 (slightly conservative)
779 */
780 if (__is_dmac64(ddev)) {
781 if (xfer_count > 0x100 &&
782 (xfer_count & 0xff) >= 0xfa &&
783 (xfer_count & 0xff) <= 0xff)
784 xfer_count -= 0x20;
785 } else {
786 if (xfer_count > 0x80 &&
787 (xfer_count & 0x7f) >= 0x7e &&
788 (xfer_count & 0x7f) <= 0x7f)
789 xfer_count -= 0x20;
790 }
791
792 desc = txx9dmac_desc_get(dc);
793 if (!desc) {
794 txx9dmac_desc_put(dc, first);
795 return NULL;
796 }
797
798 if (__is_dmac64(ddev)) {
799 desc->hwdesc.SAR = src + offset;
800 desc->hwdesc.DAR = dest + offset;
801 desc->hwdesc.CNTR = xfer_count;
802 txx9dmac_desc_set_nosimple(ddev, desc, 8, 8,
803 dc->ccr | TXX9_DMA_CCR_XFACT);
804 } else {
805 desc->hwdesc32.SAR = src + offset;
806 desc->hwdesc32.DAR = dest + offset;
807 desc->hwdesc32.CNTR = xfer_count;
808 txx9dmac_desc_set_nosimple(ddev, desc, 4, 4,
809 dc->ccr | TXX9_DMA_CCR_XFACT);
810 }
811
812 /*
813 * The descriptors on tx_list are not reachable from
814 * the dc->queue list or dc->active_list after a
815 * submit. If we put all descriptors on active_list,
816 * calling of callback on the completion will be more
817 * complex.
818 */
819 if (!first) {
820 first = desc;
821 } else {
822 desc_write_CHAR(dc, prev, desc->txd.phys);
823 dma_sync_single_for_device(chan2parent(&dc->chan),
824 prev->txd.phys, ddev->descsize,
825 DMA_TO_DEVICE);
826 list_add_tail(&desc->desc_node, &first->tx_list);
827 }
828 prev = desc;
829 }
830
831 /* Trigger interrupt after last block */
832 if (flags & DMA_PREP_INTERRUPT)
833 txx9dmac_desc_set_INTENT(ddev, prev);
834
835 desc_write_CHAR(dc, prev, 0);
836 dma_sync_single_for_device(chan2parent(&dc->chan),
837 prev->txd.phys, ddev->descsize,
838 DMA_TO_DEVICE);
839
840 first->txd.flags = flags;
841 first->len = len;
842
843 return &first->txd;
844}
845
846static struct dma_async_tx_descriptor *
847txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
848 unsigned int sg_len, enum dma_data_direction direction,
849 unsigned long flags)
850{
851 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
852 struct txx9dmac_dev *ddev = dc->ddev;
853 struct txx9dmac_slave *ds = chan->private;
854 struct txx9dmac_desc *prev;
855 struct txx9dmac_desc *first;
856 unsigned int i;
857 struct scatterlist *sg;
858
859 dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
860
861 BUG_ON(!ds || !ds->reg_width);
862 if (ds->tx_reg)
863 BUG_ON(direction != DMA_TO_DEVICE);
864 else
865 BUG_ON(direction != DMA_FROM_DEVICE);
866 if (unlikely(!sg_len))
867 return NULL;
868
869 prev = first = NULL;
870
871 for_each_sg(sgl, sg, sg_len, i) {
872 struct txx9dmac_desc *desc;
873 dma_addr_t mem;
874 u32 sai, dai;
875
876 desc = txx9dmac_desc_get(dc);
877 if (!desc) {
878 txx9dmac_desc_put(dc, first);
879 return NULL;
880 }
881
882 mem = sg_dma_address(sg);
883
884 if (__is_dmac64(ddev)) {
885 if (direction == DMA_TO_DEVICE) {
886 desc->hwdesc.SAR = mem;
887 desc->hwdesc.DAR = ds->tx_reg;
888 } else {
889 desc->hwdesc.SAR = ds->rx_reg;
890 desc->hwdesc.DAR = mem;
891 }
892 desc->hwdesc.CNTR = sg_dma_len(sg);
893 } else {
894 if (direction == DMA_TO_DEVICE) {
895 desc->hwdesc32.SAR = mem;
896 desc->hwdesc32.DAR = ds->tx_reg;
897 } else {
898 desc->hwdesc32.SAR = ds->rx_reg;
899 desc->hwdesc32.DAR = mem;
900 }
901 desc->hwdesc32.CNTR = sg_dma_len(sg);
902 }
903 if (direction == DMA_TO_DEVICE) {
904 sai = ds->reg_width;
905 dai = 0;
906 } else {
907 sai = 0;
908 dai = ds->reg_width;
909 }
910 txx9dmac_desc_set_nosimple(ddev, desc, sai, dai,
911 dc->ccr | TXX9_DMA_CCR_XFACT);
912
913 if (!first) {
914 first = desc;
915 } else {
916 desc_write_CHAR(dc, prev, desc->txd.phys);
917 dma_sync_single_for_device(chan2parent(&dc->chan),
918 prev->txd.phys,
919 ddev->descsize,
920 DMA_TO_DEVICE);
921 list_add_tail(&desc->desc_node, &first->tx_list);
922 }
923 prev = desc;
924 }
925
926 /* Trigger interrupt after last block */
927 if (flags & DMA_PREP_INTERRUPT)
928 txx9dmac_desc_set_INTENT(ddev, prev);
929
930 desc_write_CHAR(dc, prev, 0);
931 dma_sync_single_for_device(chan2parent(&dc->chan),
932 prev->txd.phys, ddev->descsize,
933 DMA_TO_DEVICE);
934
935 first->txd.flags = flags;
936 first->len = 0;
937
938 return &first->txd;
939}
940
941static void txx9dmac_terminate_all(struct dma_chan *chan)
942{
943 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
944 struct txx9dmac_desc *desc, *_desc;
945 LIST_HEAD(list);
946
947 dev_vdbg(chan2dev(chan), "terminate_all\n");
948 spin_lock_bh(&dc->lock);
949
950 txx9dmac_reset_chan(dc);
951
952 /* active_list entries will end up before queued entries */
953 list_splice_init(&dc->queue, &list);
954 list_splice_init(&dc->active_list, &list);
955
956 spin_unlock_bh(&dc->lock);
957
958 /* Flush all pending and queued descriptors */
959 list_for_each_entry_safe(desc, _desc, &list, desc_node)
960 txx9dmac_descriptor_complete(dc, desc);
961}
962
963static enum dma_status
964txx9dmac_is_tx_complete(struct dma_chan *chan,
965 dma_cookie_t cookie,
966 dma_cookie_t *done, dma_cookie_t *used)
967{
968 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
969 dma_cookie_t last_used;
970 dma_cookie_t last_complete;
971 int ret;
972
973 last_complete = dc->completed;
974 last_used = chan->cookie;
975
976 ret = dma_async_is_complete(cookie, last_complete, last_used);
977 if (ret != DMA_SUCCESS) {
978 spin_lock_bh(&dc->lock);
979 txx9dmac_scan_descriptors(dc);
980 spin_unlock_bh(&dc->lock);
981
982 last_complete = dc->completed;
983 last_used = chan->cookie;
984
985 ret = dma_async_is_complete(cookie, last_complete, last_used);
986 }
987
988 if (done)
989 *done = last_complete;
990 if (used)
991 *used = last_used;
992
993 return ret;
994}
995
996static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc,
997 struct txx9dmac_desc *prev)
998{
999 struct txx9dmac_dev *ddev = dc->ddev;
1000 struct txx9dmac_desc *desc;
1001 LIST_HEAD(list);
1002
1003 prev = txx9dmac_last_child(prev);
1004 txx9dmac_dequeue(dc, &list);
1005 desc = list_entry(list.next, struct txx9dmac_desc, desc_node);
1006 desc_write_CHAR(dc, prev, desc->txd.phys);
1007 dma_sync_single_for_device(chan2parent(&dc->chan),
1008 prev->txd.phys, ddev->descsize,
1009 DMA_TO_DEVICE);
1010 mmiowb();
1011 if (!(channel_readl(dc, CSR) & TXX9_DMA_CSR_CHNEN) &&
1012 channel_read_CHAR(dc) == prev->txd.phys)
1013 /* Restart chain DMA */
1014 channel_write_CHAR(dc, desc->txd.phys);
1015 list_splice_tail(&list, &dc->active_list);
1016}
1017
1018static void txx9dmac_issue_pending(struct dma_chan *chan)
1019{
1020 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
1021
1022 spin_lock_bh(&dc->lock);
1023
1024 if (!list_empty(&dc->active_list))
1025 txx9dmac_scan_descriptors(dc);
1026 if (!list_empty(&dc->queue)) {
1027 if (list_empty(&dc->active_list)) {
1028 txx9dmac_dequeue(dc, &dc->active_list);
1029 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
1030 } else if (txx9_dma_have_SMPCHN()) {
1031 struct txx9dmac_desc *prev = txx9dmac_last_active(dc);
1032
1033 if (!(prev->txd.flags & DMA_PREP_INTERRUPT) ||
1034 txx9dmac_chan_INTENT(dc))
1035 txx9dmac_chain_dynamic(dc, prev);
1036 }
1037 }
1038
1039 spin_unlock_bh(&dc->lock);
1040}
1041
1042static int txx9dmac_alloc_chan_resources(struct dma_chan *chan)
1043{
1044 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
1045 struct txx9dmac_slave *ds = chan->private;
1046 struct txx9dmac_desc *desc;
1047 int i;
1048
1049 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1050
1051 /* ASSERT: channel is idle */
1052 if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) {
1053 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
1054 return -EIO;
1055 }
1056
1057 dc->completed = chan->cookie = 1;
1058
1059 dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE;
1060 txx9dmac_chan_set_SMPCHN(dc);
1061 if (!txx9_dma_have_SMPCHN() || (dc->ccr & TXX9_DMA_CCR_SMPCHN))
1062 dc->ccr |= TXX9_DMA_CCR_INTENC;
1063 if (chan->device->device_prep_dma_memcpy) {
1064 if (ds)
1065 return -EINVAL;
1066 dc->ccr |= TXX9_DMA_CCR_XFSZ_X8;
1067 } else {
1068 if (!ds ||
1069 (ds->tx_reg && ds->rx_reg) || (!ds->tx_reg && !ds->rx_reg))
1070 return -EINVAL;
1071 dc->ccr |= TXX9_DMA_CCR_EXTRQ |
1072 TXX9_DMA_CCR_XFSZ(__ffs(ds->reg_width));
1073 txx9dmac_chan_set_INTENT(dc);
1074 }
1075
1076 spin_lock_bh(&dc->lock);
1077 i = dc->descs_allocated;
1078 while (dc->descs_allocated < TXX9_DMA_INITIAL_DESC_COUNT) {
1079 spin_unlock_bh(&dc->lock);
1080
1081 desc = txx9dmac_desc_alloc(dc, GFP_KERNEL);
1082 if (!desc) {
1083 dev_info(chan2dev(chan),
1084 "only allocated %d descriptors\n", i);
1085 spin_lock_bh(&dc->lock);
1086 break;
1087 }
1088 txx9dmac_desc_put(dc, desc);
1089
1090 spin_lock_bh(&dc->lock);
1091 i = ++dc->descs_allocated;
1092 }
1093 spin_unlock_bh(&dc->lock);
1094
1095 dev_dbg(chan2dev(chan),
1096 "alloc_chan_resources allocated %d descriptors\n", i);
1097
1098 return i;
1099}
1100
1101static void txx9dmac_free_chan_resources(struct dma_chan *chan)
1102{
1103 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
1104 struct txx9dmac_dev *ddev = dc->ddev;
1105 struct txx9dmac_desc *desc, *_desc;
1106 LIST_HEAD(list);
1107
1108 dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
1109 dc->descs_allocated);
1110
1111 /* ASSERT: channel is idle */
1112 BUG_ON(!list_empty(&dc->active_list));
1113 BUG_ON(!list_empty(&dc->queue));
1114 BUG_ON(channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT);
1115
1116 spin_lock_bh(&dc->lock);
1117 list_splice_init(&dc->free_list, &list);
1118 dc->descs_allocated = 0;
1119 spin_unlock_bh(&dc->lock);
1120
1121 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
1122 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1123 dma_unmap_single(chan2parent(chan), desc->txd.phys,
1124 ddev->descsize, DMA_TO_DEVICE);
1125 kfree(desc);
1126 }
1127
1128 dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
1129}
1130
1131/*----------------------------------------------------------------------*/
1132
1133static void txx9dmac_off(struct txx9dmac_dev *ddev)
1134{
1135 dma_writel(ddev, MCR, 0);
1136 mmiowb();
1137}
1138
1139static int __init txx9dmac_chan_probe(struct platform_device *pdev)
1140{
1141 struct txx9dmac_chan_platform_data *cpdata = pdev->dev.platform_data;
1142 struct platform_device *dmac_dev = cpdata->dmac_dev;
1143 struct txx9dmac_platform_data *pdata = dmac_dev->dev.platform_data;
1144 struct txx9dmac_chan *dc;
1145 int err;
1146 int ch = pdev->id % TXX9_DMA_MAX_NR_CHANNELS;
1147 int irq;
1148
1149 dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL);
1150 if (!dc)
1151 return -ENOMEM;
1152
1153 dc->dma.dev = &pdev->dev;
1154 dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources;
1155 dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources;
1156 dc->dma.device_terminate_all = txx9dmac_terminate_all;
1157 dc->dma.device_is_tx_complete = txx9dmac_is_tx_complete;
1158 dc->dma.device_issue_pending = txx9dmac_issue_pending;
1159 if (pdata && pdata->memcpy_chan == ch) {
1160 dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy;
1161 dma_cap_set(DMA_MEMCPY, dc->dma.cap_mask);
1162 } else {
1163 dc->dma.device_prep_slave_sg = txx9dmac_prep_slave_sg;
1164 dma_cap_set(DMA_SLAVE, dc->dma.cap_mask);
1165 dma_cap_set(DMA_PRIVATE, dc->dma.cap_mask);
1166 }
1167
1168 INIT_LIST_HEAD(&dc->dma.channels);
1169 dc->ddev = platform_get_drvdata(dmac_dev);
1170 if (dc->ddev->irq < 0) {
1171 irq = platform_get_irq(pdev, 0);
1172 if (irq < 0)
1173 return irq;
1174 tasklet_init(&dc->tasklet, txx9dmac_chan_tasklet,
1175 (unsigned long)dc);
1176 dc->irq = irq;
1177 err = devm_request_irq(&pdev->dev, dc->irq,
1178 txx9dmac_chan_interrupt, 0, dev_name(&pdev->dev), dc);
1179 if (err)
1180 return err;
1181 } else
1182 dc->irq = -1;
1183 dc->ddev->chan[ch] = dc;
1184 dc->chan.device = &dc->dma;
1185 list_add_tail(&dc->chan.device_node, &dc->chan.device->channels);
1186 dc->chan.cookie = dc->completed = 1;
1187
1188 if (is_dmac64(dc))
1189 dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch];
1190 else
1191 dc->ch_regs = &__txx9dmac_regs32(dc->ddev)->CHAN[ch];
1192 spin_lock_init(&dc->lock);
1193
1194 INIT_LIST_HEAD(&dc->active_list);
1195 INIT_LIST_HEAD(&dc->queue);
1196 INIT_LIST_HEAD(&dc->free_list);
1197
1198 txx9dmac_reset_chan(dc);
1199
1200 platform_set_drvdata(pdev, dc);
1201
1202 err = dma_async_device_register(&dc->dma);
1203 if (err)
1204 return err;
1205 dev_dbg(&pdev->dev, "TXx9 DMA Channel (dma%d%s%s)\n",
1206 dc->dma.dev_id,
1207 dma_has_cap(DMA_MEMCPY, dc->dma.cap_mask) ? " memcpy" : "",
1208 dma_has_cap(DMA_SLAVE, dc->dma.cap_mask) ? " slave" : "");
1209
1210 return 0;
1211}
1212
1213static int __exit txx9dmac_chan_remove(struct platform_device *pdev)
1214{
1215 struct txx9dmac_chan *dc = platform_get_drvdata(pdev);
1216
1217 dma_async_device_unregister(&dc->dma);
1218 if (dc->irq >= 0)
1219 tasklet_kill(&dc->tasklet);
1220 dc->ddev->chan[pdev->id % TXX9_DMA_MAX_NR_CHANNELS] = NULL;
1221 return 0;
1222}
1223
1224static int __init txx9dmac_probe(struct platform_device *pdev)
1225{
1226 struct txx9dmac_platform_data *pdata = pdev->dev.platform_data;
1227 struct resource *io;
1228 struct txx9dmac_dev *ddev;
1229 u32 mcr;
1230 int err;
1231
1232 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1233 if (!io)
1234 return -EINVAL;
1235
1236 ddev = devm_kzalloc(&pdev->dev, sizeof(*ddev), GFP_KERNEL);
1237 if (!ddev)
1238 return -ENOMEM;
1239
1240 if (!devm_request_mem_region(&pdev->dev, io->start, resource_size(io),
1241 dev_name(&pdev->dev)))
1242 return -EBUSY;
1243
1244 ddev->regs = devm_ioremap(&pdev->dev, io->start, resource_size(io));
1245 if (!ddev->regs)
1246 return -ENOMEM;
1247 ddev->have_64bit_regs = pdata->have_64bit_regs;
1248 if (__is_dmac64(ddev))
1249 ddev->descsize = sizeof(struct txx9dmac_hwdesc);
1250 else
1251 ddev->descsize = sizeof(struct txx9dmac_hwdesc32);
1252
1253 /* force dma off, just in case */
1254 txx9dmac_off(ddev);
1255
1256 ddev->irq = platform_get_irq(pdev, 0);
1257 if (ddev->irq >= 0) {
1258 tasklet_init(&ddev->tasklet, txx9dmac_tasklet,
1259 (unsigned long)ddev);
1260 err = devm_request_irq(&pdev->dev, ddev->irq,
1261 txx9dmac_interrupt, 0, dev_name(&pdev->dev), ddev);
1262 if (err)
1263 return err;
1264 }
1265
1266 mcr = TXX9_DMA_MCR_MSTEN | MCR_LE;
1267 if (pdata && pdata->memcpy_chan >= 0)
1268 mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan);
1269 dma_writel(ddev, MCR, mcr);
1270
1271 platform_set_drvdata(pdev, ddev);
1272 return 0;
1273}
1274
1275static int __exit txx9dmac_remove(struct platform_device *pdev)
1276{
1277 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1278
1279 txx9dmac_off(ddev);
1280 if (ddev->irq >= 0)
1281 tasklet_kill(&ddev->tasklet);
1282 return 0;
1283}
1284
1285static void txx9dmac_shutdown(struct platform_device *pdev)
1286{
1287 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1288
1289 txx9dmac_off(ddev);
1290}
1291
1292static int txx9dmac_suspend_late(struct platform_device *pdev,
1293 pm_message_t mesg)
1294{
1295 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1296
1297 txx9dmac_off(ddev);
1298 return 0;
1299}
1300
1301static int txx9dmac_resume_early(struct platform_device *pdev)
1302{
1303 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1304 struct txx9dmac_platform_data *pdata = pdev->dev.platform_data;
1305 u32 mcr;
1306
1307 mcr = TXX9_DMA_MCR_MSTEN | MCR_LE;
1308 if (pdata && pdata->memcpy_chan >= 0)
1309 mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan);
1310 dma_writel(ddev, MCR, mcr);
1311 return 0;
1312
1313}
1314
1315static struct platform_driver txx9dmac_chan_driver = {
1316 .remove = __exit_p(txx9dmac_chan_remove),
1317 .driver = {
1318 .name = "txx9dmac-chan",
1319 },
1320};
1321
1322static struct platform_driver txx9dmac_driver = {
1323 .remove = __exit_p(txx9dmac_remove),
1324 .shutdown = txx9dmac_shutdown,
1325 .suspend_late = txx9dmac_suspend_late,
1326 .resume_early = txx9dmac_resume_early,
1327 .driver = {
1328 .name = "txx9dmac",
1329 },
1330};
1331
1332static int __init txx9dmac_init(void)
1333{
1334 int rc;
1335
1336 rc = platform_driver_probe(&txx9dmac_driver, txx9dmac_probe);
1337 if (!rc) {
1338 rc = platform_driver_probe(&txx9dmac_chan_driver,
1339 txx9dmac_chan_probe);
1340 if (rc)
1341 platform_driver_unregister(&txx9dmac_driver);
1342 }
1343 return rc;
1344}
1345module_init(txx9dmac_init);
1346
1347static void __exit txx9dmac_exit(void)
1348{
1349 platform_driver_unregister(&txx9dmac_chan_driver);
1350 platform_driver_unregister(&txx9dmac_driver);
1351}
1352module_exit(txx9dmac_exit);
1353
1354MODULE_LICENSE("GPL");
1355MODULE_DESCRIPTION("TXx9 DMA Controller driver");
1356MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
diff --git a/drivers/dma/txx9dmac.h b/drivers/dma/txx9dmac.h
new file mode 100644
index 000000000000..365d42366b9f
--- /dev/null
+++ b/drivers/dma/txx9dmac.h
@@ -0,0 +1,308 @@
1/*
2 * Driver for the TXx9 SoC DMA Controller
3 *
4 * Copyright (C) 2009 Atsushi Nemoto
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef TXX9DMAC_H
11#define TXX9DMAC_H
12
13#include <linux/dmaengine.h>
14#include <asm/txx9/dmac.h>
15
16/*
17 * Design Notes:
18 *
19 * This DMAC have four channels and one FIFO buffer. Each channel can
20 * be configured for memory-memory or device-memory transfer, but only
21 * one channel can do alignment-free memory-memory transfer at a time
22 * while the channel should occupy the FIFO buffer for effective
23 * transfers.
24 *
25 * Instead of dynamically assign the FIFO buffer to channels, I chose
26 * make one dedicated channel for memory-memory transfer. The
27 * dedicated channel is public. Other channels are private and used
28 * for slave transfer. Some devices in the SoC are wired to certain
29 * DMA channel.
30 */
31
32#ifdef CONFIG_MACH_TX49XX
33static inline bool txx9_dma_have_SMPCHN(void)
34{
35 return true;
36}
37#define TXX9_DMA_USE_SIMPLE_CHAIN
38#else
39static inline bool txx9_dma_have_SMPCHN(void)
40{
41 return false;
42}
43#endif
44
45#ifdef __LITTLE_ENDIAN
46#ifdef CONFIG_MACH_TX49XX
47#define CCR_LE TXX9_DMA_CCR_LE
48#define MCR_LE 0
49#else
50#define CCR_LE 0
51#define MCR_LE TXX9_DMA_MCR_LE
52#endif
53#else
54#define CCR_LE 0
55#define MCR_LE 0
56#endif
57
58/*
59 * Redefine this macro to handle differences between 32- and 64-bit
60 * addressing, big vs. little endian, etc.
61 */
62#ifdef __BIG_ENDIAN
63#define TXX9_DMA_REG32(name) u32 __pad_##name; u32 name
64#else
65#define TXX9_DMA_REG32(name) u32 name; u32 __pad_##name
66#endif
67
68/* Hardware register definitions. */
69struct txx9dmac_cregs {
70#if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR)
71 TXX9_DMA_REG32(CHAR); /* Chain Address Register */
72#else
73 u64 CHAR; /* Chain Address Register */
74#endif
75 u64 SAR; /* Source Address Register */
76 u64 DAR; /* Destination Address Register */
77 TXX9_DMA_REG32(CNTR); /* Count Register */
78 TXX9_DMA_REG32(SAIR); /* Source Address Increment Register */
79 TXX9_DMA_REG32(DAIR); /* Destination Address Increment Register */
80 TXX9_DMA_REG32(CCR); /* Channel Control Register */
81 TXX9_DMA_REG32(CSR); /* Channel Status Register */
82};
83struct txx9dmac_cregs32 {
84 u32 CHAR;
85 u32 SAR;
86 u32 DAR;
87 u32 CNTR;
88 u32 SAIR;
89 u32 DAIR;
90 u32 CCR;
91 u32 CSR;
92};
93
94struct txx9dmac_regs {
95 /* per-channel registers */
96 struct txx9dmac_cregs CHAN[TXX9_DMA_MAX_NR_CHANNELS];
97 u64 __pad[9];
98 u64 MFDR; /* Memory Fill Data Register */
99 TXX9_DMA_REG32(MCR); /* Master Control Register */
100};
101struct txx9dmac_regs32 {
102 struct txx9dmac_cregs32 CHAN[TXX9_DMA_MAX_NR_CHANNELS];
103 u32 __pad[9];
104 u32 MFDR;
105 u32 MCR;
106};
107
108/* bits for MCR */
109#define TXX9_DMA_MCR_EIS(ch) (0x10000000<<(ch))
110#define TXX9_DMA_MCR_DIS(ch) (0x01000000<<(ch))
111#define TXX9_DMA_MCR_RSFIF 0x00000080
112#define TXX9_DMA_MCR_FIFUM(ch) (0x00000008<<(ch))
113#define TXX9_DMA_MCR_LE 0x00000004
114#define TXX9_DMA_MCR_RPRT 0x00000002
115#define TXX9_DMA_MCR_MSTEN 0x00000001
116
117/* bits for CCRn */
118#define TXX9_DMA_CCR_IMMCHN 0x20000000
119#define TXX9_DMA_CCR_USEXFSZ 0x10000000
120#define TXX9_DMA_CCR_LE 0x08000000
121#define TXX9_DMA_CCR_DBINH 0x04000000
122#define TXX9_DMA_CCR_SBINH 0x02000000
123#define TXX9_DMA_CCR_CHRST 0x01000000
124#define TXX9_DMA_CCR_RVBYTE 0x00800000
125#define TXX9_DMA_CCR_ACKPOL 0x00400000
126#define TXX9_DMA_CCR_REQPL 0x00200000
127#define TXX9_DMA_CCR_EGREQ 0x00100000
128#define TXX9_DMA_CCR_CHDN 0x00080000
129#define TXX9_DMA_CCR_DNCTL 0x00060000
130#define TXX9_DMA_CCR_EXTRQ 0x00010000
131#define TXX9_DMA_CCR_INTRQD 0x0000e000
132#define TXX9_DMA_CCR_INTENE 0x00001000
133#define TXX9_DMA_CCR_INTENC 0x00000800
134#define TXX9_DMA_CCR_INTENT 0x00000400
135#define TXX9_DMA_CCR_CHNEN 0x00000200
136#define TXX9_DMA_CCR_XFACT 0x00000100
137#define TXX9_DMA_CCR_SMPCHN 0x00000020
138#define TXX9_DMA_CCR_XFSZ(order) (((order) << 2) & 0x0000001c)
139#define TXX9_DMA_CCR_XFSZ_1 TXX9_DMA_CCR_XFSZ(0)
140#define TXX9_DMA_CCR_XFSZ_2 TXX9_DMA_CCR_XFSZ(1)
141#define TXX9_DMA_CCR_XFSZ_4 TXX9_DMA_CCR_XFSZ(2)
142#define TXX9_DMA_CCR_XFSZ_8 TXX9_DMA_CCR_XFSZ(3)
143#define TXX9_DMA_CCR_XFSZ_X4 TXX9_DMA_CCR_XFSZ(4)
144#define TXX9_DMA_CCR_XFSZ_X8 TXX9_DMA_CCR_XFSZ(5)
145#define TXX9_DMA_CCR_XFSZ_X16 TXX9_DMA_CCR_XFSZ(6)
146#define TXX9_DMA_CCR_XFSZ_X32 TXX9_DMA_CCR_XFSZ(7)
147#define TXX9_DMA_CCR_MEMIO 0x00000002
148#define TXX9_DMA_CCR_SNGAD 0x00000001
149
150/* bits for CSRn */
151#define TXX9_DMA_CSR_CHNEN 0x00000400
152#define TXX9_DMA_CSR_STLXFER 0x00000200
153#define TXX9_DMA_CSR_XFACT 0x00000100
154#define TXX9_DMA_CSR_ABCHC 0x00000080
155#define TXX9_DMA_CSR_NCHNC 0x00000040
156#define TXX9_DMA_CSR_NTRNFC 0x00000020
157#define TXX9_DMA_CSR_EXTDN 0x00000010
158#define TXX9_DMA_CSR_CFERR 0x00000008
159#define TXX9_DMA_CSR_CHERR 0x00000004
160#define TXX9_DMA_CSR_DESERR 0x00000002
161#define TXX9_DMA_CSR_SORERR 0x00000001
162
163struct txx9dmac_chan {
164 struct dma_chan chan;
165 struct dma_device dma;
166 struct txx9dmac_dev *ddev;
167 void __iomem *ch_regs;
168 struct tasklet_struct tasklet;
169 int irq;
170 u32 ccr;
171
172 spinlock_t lock;
173
174 /* these other elements are all protected by lock */
175 dma_cookie_t completed;
176 struct list_head active_list;
177 struct list_head queue;
178 struct list_head free_list;
179
180 unsigned int descs_allocated;
181};
182
183struct txx9dmac_dev {
184 void __iomem *regs;
185 struct tasklet_struct tasklet;
186 int irq;
187 struct txx9dmac_chan *chan[TXX9_DMA_MAX_NR_CHANNELS];
188 bool have_64bit_regs;
189 unsigned int descsize;
190};
191
192static inline bool __is_dmac64(const struct txx9dmac_dev *ddev)
193{
194 return ddev->have_64bit_regs;
195}
196
197static inline bool is_dmac64(const struct txx9dmac_chan *dc)
198{
199 return __is_dmac64(dc->ddev);
200}
201
202#ifdef TXX9_DMA_USE_SIMPLE_CHAIN
203/* Hardware descriptor definition. (for simple-chain) */
204struct txx9dmac_hwdesc {
205#if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR)
206 TXX9_DMA_REG32(CHAR);
207#else
208 u64 CHAR;
209#endif
210 u64 SAR;
211 u64 DAR;
212 TXX9_DMA_REG32(CNTR);
213};
214struct txx9dmac_hwdesc32 {
215 u32 CHAR;
216 u32 SAR;
217 u32 DAR;
218 u32 CNTR;
219};
220#else
221#define txx9dmac_hwdesc txx9dmac_cregs
222#define txx9dmac_hwdesc32 txx9dmac_cregs32
223#endif
224
225struct txx9dmac_desc {
226 /* FIRST values the hardware uses */
227 union {
228 struct txx9dmac_hwdesc hwdesc;
229 struct txx9dmac_hwdesc32 hwdesc32;
230 };
231
232 /* THEN values for driver housekeeping */
233 struct list_head desc_node ____cacheline_aligned;
234 struct list_head tx_list;
235 struct dma_async_tx_descriptor txd;
236 size_t len;
237};
238
239#ifdef TXX9_DMA_USE_SIMPLE_CHAIN
240
241static inline bool txx9dmac_chan_INTENT(struct txx9dmac_chan *dc)
242{
243 return (dc->ccr & TXX9_DMA_CCR_INTENT) != 0;
244}
245
246static inline void txx9dmac_chan_set_INTENT(struct txx9dmac_chan *dc)
247{
248 dc->ccr |= TXX9_DMA_CCR_INTENT;
249}
250
251static inline void txx9dmac_desc_set_INTENT(struct txx9dmac_dev *ddev,
252 struct txx9dmac_desc *desc)
253{
254}
255
256static inline void txx9dmac_chan_set_SMPCHN(struct txx9dmac_chan *dc)
257{
258 dc->ccr |= TXX9_DMA_CCR_SMPCHN;
259}
260
261static inline void txx9dmac_desc_set_nosimple(struct txx9dmac_dev *ddev,
262 struct txx9dmac_desc *desc,
263 u32 sair, u32 dair, u32 ccr)
264{
265}
266
267#else /* TXX9_DMA_USE_SIMPLE_CHAIN */
268
269static inline bool txx9dmac_chan_INTENT(struct txx9dmac_chan *dc)
270{
271 return true;
272}
273
274static void txx9dmac_chan_set_INTENT(struct txx9dmac_chan *dc)
275{
276}
277
278static inline void txx9dmac_desc_set_INTENT(struct txx9dmac_dev *ddev,
279 struct txx9dmac_desc *desc)
280{
281 if (__is_dmac64(ddev))
282 desc->hwdesc.CCR |= TXX9_DMA_CCR_INTENT;
283 else
284 desc->hwdesc32.CCR |= TXX9_DMA_CCR_INTENT;
285}
286
287static inline void txx9dmac_chan_set_SMPCHN(struct txx9dmac_chan *dc)
288{
289}
290
291static inline void txx9dmac_desc_set_nosimple(struct txx9dmac_dev *ddev,
292 struct txx9dmac_desc *desc,
293 u32 sai, u32 dai, u32 ccr)
294{
295 if (__is_dmac64(ddev)) {
296 desc->hwdesc.SAIR = sai;
297 desc->hwdesc.DAIR = dai;
298 desc->hwdesc.CCR = ccr;
299 } else {
300 desc->hwdesc32.SAIR = sai;
301 desc->hwdesc32.DAIR = dai;
302 desc->hwdesc32.CCR = ccr;
303 }
304}
305
306#endif /* TXX9_DMA_USE_SIMPLE_CHAIN */
307
308#endif /* TXX9DMAC_H */