aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorZhang Wei <wei.zhang@freescale.com>2008-03-01 09:42:48 -0500
committerDan Williams <dan.j.williams@intel.com>2008-03-04 12:16:46 -0500
commit173acc7ce8538f1f3040791dc622a92aadc12cf4 (patch)
treef408e415851cf3343af6077287984169958951ad /drivers/dma
parent976dde010e513a9c7c3117a32b7b015f84b37430 (diff)
dmaengine: add driver for Freescale MPC85xx DMA controller
The driver implements DMA engine API for Freescale MPC85xx DMA controller, which could be used by devices in the silicon. The driver supports the Basic mode of Freescale MPC85xx DMA controller. The MPC85xx processors supported include MPC8540/60, MPC8555, MPC8548, MPC8641 and so on. The MPC83xx(MPC8349, MPC8360) are also supported. [kamalesh@linux.vnet.ibm.com: build fix] [dan.j.williams@intel.com: merge mm fixes, rebase on async_tx-2.6.25] Signed-off-by: Zhang Wei <wei.zhang@freescale.com> Signed-off-by: Ebony Zhu <ebony.zhu@freescale.com> Acked-by: Kumar Gala <galak@gate.crashing.org> Cc: Shannon Nelson <shannon.nelson@intel.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig19
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/fsldma.c1068
-rw-r--r--drivers/dma/fsldma.h189
4 files changed, 1276 insertions, 1 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index a703deffb795..27340a7b19dd 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -4,7 +4,7 @@
4 4
5menuconfig DMADEVICES 5menuconfig DMADEVICES
6 bool "DMA Engine support" 6 bool "DMA Engine support"
7 depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX 7 depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX || PPC
8 depends on !HIGHMEM64G 8 depends on !HIGHMEM64G
9 help 9 help
10 DMA engines can do asynchronous data transfers without 10 DMA engines can do asynchronous data transfers without
@@ -37,6 +37,23 @@ config INTEL_IOP_ADMA
37 help 37 help
38 Enable support for the Intel(R) IOP Series RAID engines. 38 Enable support for the Intel(R) IOP Series RAID engines.
39 39
40config FSL_DMA
41 bool "Freescale MPC85xx/MPC83xx DMA support"
42 depends on PPC
43 select DMA_ENGINE
44 ---help---
45 Enable support for the Freescale DMA engine. Now, it support
46 MPC8560/40, MPC8555, MPC8548 and MPC8641 processors.
47 The MPC8349, MPC8360 is also supported.
48
49config FSL_DMA_SELFTEST
50 bool "Enable the self test for each DMA channel"
51 depends on FSL_DMA
52 default y
53 ---help---
54 Enable the self test for each DMA channel. A self test will be
55 performed after the channel probed to ensure the DMA works well.
56
40config DMA_ENGINE 57config DMA_ENGINE
41 bool 58 bool
42 59
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index b152cd84e123..c8036d945902 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -3,3 +3,4 @@ obj-$(CONFIG_NET_DMA) += iovlock.o
3obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o 3obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
4ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o 4ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o
5obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o 5obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
6obj-$(CONFIG_FSL_DMA) += fsldma.o
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
new file mode 100644
index 000000000000..902e852571a8
--- /dev/null
+++ b/drivers/dma/fsldma.c
@@ -0,0 +1,1068 @@
1/*
2 * Freescale MPC85xx, MPC83xx DMA Engine support
3 *
4 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
5 *
6 * Author:
7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
8 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007
9 *
10 * Description:
11 * DMA engine driver for Freescale MPC8540 DMA controller, which is
12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13 * The support for MPC8349 DMA contorller is also added.
14 *
15 * This is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 */
21
22#include <linux/init.h>
23#include <linux/module.h>
24#include <linux/pci.h>
25#include <linux/interrupt.h>
26#include <linux/dmaengine.h>
27#include <linux/delay.h>
28#include <linux/dma-mapping.h>
29#include <linux/dmapool.h>
30#include <linux/of_platform.h>
31
32#include "fsldma.h"
33
34static void dma_init(struct fsl_dma_chan *fsl_chan)
35{
36 /* Reset the channel */
37 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32);
38
39 switch (fsl_chan->feature & FSL_DMA_IP_MASK) {
40 case FSL_DMA_IP_85XX:
41 /* Set the channel to below modes:
42 * EIE - Error interrupt enable
43 * EOSIE - End of segments interrupt enable (basic mode)
44 * EOLNIE - End of links interrupt enable
45 */
46 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE
47 | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32);
48 break;
49 case FSL_DMA_IP_83XX:
50 /* Set the channel to below modes:
51 * EOTIE - End-of-transfer interrupt enable
52 */
53 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE,
54 32);
55 break;
56 }
57
58}
59
60static void set_sr(struct fsl_dma_chan *fsl_chan, dma_addr_t val)
61{
62 DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32);
63}
64
65static dma_addr_t get_sr(struct fsl_dma_chan *fsl_chan)
66{
67 return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32);
68}
69
70static void set_desc_cnt(struct fsl_dma_chan *fsl_chan,
71 struct fsl_dma_ld_hw *hw, u32 count)
72{
73 hw->count = CPU_TO_DMA(fsl_chan, count, 32);
74}
75
76static void set_desc_src(struct fsl_dma_chan *fsl_chan,
77 struct fsl_dma_ld_hw *hw, dma_addr_t src)
78{
79 u64 snoop_bits;
80
81 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
82 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
83 hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64);
84}
85
86static void set_desc_dest(struct fsl_dma_chan *fsl_chan,
87 struct fsl_dma_ld_hw *hw, dma_addr_t dest)
88{
89 u64 snoop_bits;
90
91 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
92 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
93 hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64);
94}
95
96static void set_desc_next(struct fsl_dma_chan *fsl_chan,
97 struct fsl_dma_ld_hw *hw, dma_addr_t next)
98{
99 u64 snoop_bits;
100
101 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
102 ? FSL_DMA_SNEN : 0;
103 hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64);
104}
105
106static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
107{
108 DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64);
109}
110
111static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan)
112{
113 return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN;
114}
115
116static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
117{
118 DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64);
119}
120
121static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan)
122{
123 return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64);
124}
125
126static int dma_is_idle(struct fsl_dma_chan *fsl_chan)
127{
128 u32 sr = get_sr(fsl_chan);
129 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
130}
131
132static void dma_start(struct fsl_dma_chan *fsl_chan)
133{
134 u32 mr_set = 0;;
135
136 if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
137 DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32);
138 mr_set |= FSL_DMA_MR_EMP_EN;
139 } else
140 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
141 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
142 & ~FSL_DMA_MR_EMP_EN, 32);
143
144 if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT)
145 mr_set |= FSL_DMA_MR_EMS_EN;
146 else
147 mr_set |= FSL_DMA_MR_CS;
148
149 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
150 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
151 | mr_set, 32);
152}
153
154static void dma_halt(struct fsl_dma_chan *fsl_chan)
155{
156 int i = 0;
157 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
158 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA,
159 32);
160 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
161 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS
162 | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32);
163
164 while (!dma_is_idle(fsl_chan) && (i++ < 100))
165 udelay(10);
166 if (i >= 100 && !dma_is_idle(fsl_chan))
167 dev_err(fsl_chan->dev, "DMA halt timeout!\n");
168}
169
170static void set_ld_eol(struct fsl_dma_chan *fsl_chan,
171 struct fsl_desc_sw *desc)
172{
173 desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
174 DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL,
175 64);
176}
177
178static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
179 struct fsl_desc_sw *new_desc)
180{
181 struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev);
182
183 if (list_empty(&fsl_chan->ld_queue))
184 return;
185
186 /* Link to the new descriptor physical address and
187 * Enable End-of-segment interrupt for
188 * the last link descriptor.
189 * (the previous node's next link descriptor)
190 *
191 * For FSL_DMA_IP_83xx, the snoop enable bit need be set.
192 */
193 queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
194 new_desc->async_tx.phys | FSL_DMA_EOSIE |
195 (((fsl_chan->feature & FSL_DMA_IP_MASK)
196 == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64);
197}
198
199/**
200 * fsl_chan_set_src_loop_size - Set source address hold transfer size
201 * @fsl_chan : Freescale DMA channel
202 * @size : Address loop size, 0 for disable loop
203 *
204 * The set source address hold transfer size. The source
205 * address hold or loop transfer size is when the DMA transfer
206 * data from source address (SA), if the loop size is 4, the DMA will
207 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
208 * SA + 1 ... and so on.
209 */
210static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size)
211{
212 switch (size) {
213 case 0:
214 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
215 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
216 (~FSL_DMA_MR_SAHE), 32);
217 break;
218 case 1:
219 case 2:
220 case 4:
221 case 8:
222 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
223 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
224 FSL_DMA_MR_SAHE | (__ilog2(size) << 14),
225 32);
226 break;
227 }
228}
229
230/**
231 * fsl_chan_set_dest_loop_size - Set destination address hold transfer size
232 * @fsl_chan : Freescale DMA channel
233 * @size : Address loop size, 0 for disable loop
234 *
235 * The set destination address hold transfer size. The destination
236 * address hold or loop transfer size is when the DMA transfer
237 * data to destination address (TA), if the loop size is 4, the DMA will
238 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
239 * TA + 1 ... and so on.
240 */
241static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size)
242{
243 switch (size) {
244 case 0:
245 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
246 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
247 (~FSL_DMA_MR_DAHE), 32);
248 break;
249 case 1:
250 case 2:
251 case 4:
252 case 8:
253 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
254 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
255 FSL_DMA_MR_DAHE | (__ilog2(size) << 16),
256 32);
257 break;
258 }
259}
260
261/**
262 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
263 * @fsl_chan : Freescale DMA channel
264 * @size : Pause control size, 0 for disable external pause control.
265 * The maximum is 1024.
266 *
267 * The Freescale DMA channel can be controlled by the external
268 * signal DREQ#. The pause control size is how many bytes are allowed
269 * to transfer before pausing the channel, after which a new assertion
270 * of DREQ# resumes channel operation.
271 */
272static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int size)
273{
274 if (size > 1024)
275 return;
276
277 if (size) {
278 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
279 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
280 | ((__ilog2(size) << 24) & 0x0f000000),
281 32);
282 fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
283 } else
284 fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
285}
286
287/**
288 * fsl_chan_toggle_ext_start - Toggle channel external start status
289 * @fsl_chan : Freescale DMA channel
290 * @enable : 0 is disabled, 1 is enabled.
291 *
292 * If enable the external start, the channel can be started by an
293 * external DMA start pin. So the dma_start() does not start the
294 * transfer immediately. The DMA channel will wait for the
295 * control pin asserted.
296 */
297static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable)
298{
299 if (enable)
300 fsl_chan->feature |= FSL_DMA_CHAN_START_EXT;
301 else
302 fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT;
303}
304
305static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
306{
307 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
308 struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan);
309 unsigned long flags;
310 dma_cookie_t cookie;
311
312 /* cookie increment and adding to ld_queue must be atomic */
313 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
314
315 cookie = fsl_chan->common.cookie;
316 cookie++;
317 if (cookie < 0)
318 cookie = 1;
319 desc->async_tx.cookie = cookie;
320 fsl_chan->common.cookie = desc->async_tx.cookie;
321
322 append_ld_queue(fsl_chan, desc);
323 list_splice_init(&desc->async_tx.tx_list, fsl_chan->ld_queue.prev);
324
325 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
326
327 return cookie;
328}
329
330/**
331 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
332 * @fsl_chan : Freescale DMA channel
333 *
334 * Return - The descriptor allocated. NULL for failed.
335 */
336static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
337 struct fsl_dma_chan *fsl_chan)
338{
339 dma_addr_t pdesc;
340 struct fsl_desc_sw *desc_sw;
341
342 desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc);
343 if (desc_sw) {
344 memset(desc_sw, 0, sizeof(struct fsl_desc_sw));
345 dma_async_tx_descriptor_init(&desc_sw->async_tx,
346 &fsl_chan->common);
347 desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
348 INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
349 desc_sw->async_tx.phys = pdesc;
350 }
351
352 return desc_sw;
353}
354
355
356/**
357 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
358 * @fsl_chan : Freescale DMA channel
359 *
360 * This function will create a dma pool for descriptor allocation.
361 *
362 * Return - The number of descriptors allocated.
363 */
364static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
365{
366 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
367 LIST_HEAD(tmp_list);
368
369 /* We need the descriptor to be aligned to 32bytes
370 * for meeting FSL DMA specification requirement.
371 */
372 fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
373 fsl_chan->dev, sizeof(struct fsl_desc_sw),
374 32, 0);
375 if (!fsl_chan->desc_pool) {
376 dev_err(fsl_chan->dev, "No memory for channel %d "
377 "descriptor dma pool.\n", fsl_chan->id);
378 return 0;
379 }
380
381 return 1;
382}
383
384/**
385 * fsl_dma_free_chan_resources - Free all resources of the channel.
386 * @fsl_chan : Freescale DMA channel
387 */
388static void fsl_dma_free_chan_resources(struct dma_chan *chan)
389{
390 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
391 struct fsl_desc_sw *desc, *_desc;
392 unsigned long flags;
393
394 dev_dbg(fsl_chan->dev, "Free all channel resources.\n");
395 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
396 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
397#ifdef FSL_DMA_LD_DEBUG
398 dev_dbg(fsl_chan->dev,
399 "LD %p will be released.\n", desc);
400#endif
401 list_del(&desc->node);
402 /* free link descriptor */
403 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
404 }
405 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
406 dma_pool_destroy(fsl_chan->desc_pool);
407}
408
409static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
410 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
411 size_t len, unsigned long flags)
412{
413 struct fsl_dma_chan *fsl_chan;
414 struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
415 size_t copy;
416 LIST_HEAD(link_chain);
417
418 if (!chan)
419 return NULL;
420
421 if (!len)
422 return NULL;
423
424 fsl_chan = to_fsl_chan(chan);
425
426 do {
427
428 /* Allocate the link descriptor from DMA pool */
429 new = fsl_dma_alloc_descriptor(fsl_chan);
430 if (!new) {
431 dev_err(fsl_chan->dev,
432 "No free memory for link descriptor\n");
433 return NULL;
434 }
435#ifdef FSL_DMA_LD_DEBUG
436 dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
437#endif
438
439 copy = min(len, FSL_DMA_BCR_MAX_CNT);
440
441 set_desc_cnt(fsl_chan, &new->hw, copy);
442 set_desc_src(fsl_chan, &new->hw, dma_src);
443 set_desc_dest(fsl_chan, &new->hw, dma_dest);
444
445 if (!first)
446 first = new;
447 else
448 set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys);
449
450 new->async_tx.cookie = 0;
451 new->async_tx.ack = 1;
452
453 prev = new;
454 len -= copy;
455 dma_src += copy;
456 dma_dest += copy;
457
458 /* Insert the link descriptor to the LD ring */
459 list_add_tail(&new->node, &first->async_tx.tx_list);
460 } while (len);
461
462 new->async_tx.ack = 0; /* client is in control of this ack */
463 new->async_tx.cookie = -EBUSY;
464
465 /* Set End-of-link to the last link descriptor of new list*/
466 set_ld_eol(fsl_chan, new);
467
468 return first ? &first->async_tx : NULL;
469}
470
471/**
472 * fsl_dma_update_completed_cookie - Update the completed cookie.
473 * @fsl_chan : Freescale DMA channel
474 */
475static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan)
476{
477 struct fsl_desc_sw *cur_desc, *desc;
478 dma_addr_t ld_phy;
479
480 ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK;
481
482 if (ld_phy) {
483 cur_desc = NULL;
484 list_for_each_entry(desc, &fsl_chan->ld_queue, node)
485 if (desc->async_tx.phys == ld_phy) {
486 cur_desc = desc;
487 break;
488 }
489
490 if (cur_desc && cur_desc->async_tx.cookie) {
491 if (dma_is_idle(fsl_chan))
492 fsl_chan->completed_cookie =
493 cur_desc->async_tx.cookie;
494 else
495 fsl_chan->completed_cookie =
496 cur_desc->async_tx.cookie - 1;
497 }
498 }
499}
500
501/**
502 * fsl_chan_ld_cleanup - Clean up link descriptors
503 * @fsl_chan : Freescale DMA channel
504 *
505 * This function clean up the ld_queue of DMA channel.
506 * If 'in_intr' is set, the function will move the link descriptor to
507 * the recycle list. Otherwise, free it directly.
508 */
509static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan)
510{
511 struct fsl_desc_sw *desc, *_desc;
512 unsigned long flags;
513
514 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
515
516 fsl_dma_update_completed_cookie(fsl_chan);
517 dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n",
518 fsl_chan->completed_cookie);
519 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
520 dma_async_tx_callback callback;
521 void *callback_param;
522
523 if (dma_async_is_complete(desc->async_tx.cookie,
524 fsl_chan->completed_cookie, fsl_chan->common.cookie)
525 == DMA_IN_PROGRESS)
526 break;
527
528 callback = desc->async_tx.callback;
529 callback_param = desc->async_tx.callback_param;
530
531 /* Remove from ld_queue list */
532 list_del(&desc->node);
533
534 dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n",
535 desc);
536 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
537
538 /* Run the link descriptor callback function */
539 if (callback) {
540 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
541 dev_dbg(fsl_chan->dev, "link descriptor %p callback\n",
542 desc);
543 callback(callback_param);
544 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
545 }
546 }
547 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
548}
549
550/**
551 * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue.
552 * @fsl_chan : Freescale DMA channel
553 */
554static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan)
555{
556 struct list_head *ld_node;
557 dma_addr_t next_dest_addr;
558 unsigned long flags;
559
560 if (!dma_is_idle(fsl_chan))
561 return;
562
563 dma_halt(fsl_chan);
564
565 /* If there are some link descriptors
566 * not transfered in queue. We need to start it.
567 */
568 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
569
570 /* Find the first un-transfer desciptor */
571 for (ld_node = fsl_chan->ld_queue.next;
572 (ld_node != &fsl_chan->ld_queue)
573 && (dma_async_is_complete(
574 to_fsl_desc(ld_node)->async_tx.cookie,
575 fsl_chan->completed_cookie,
576 fsl_chan->common.cookie) == DMA_SUCCESS);
577 ld_node = ld_node->next);
578
579 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
580
581 if (ld_node != &fsl_chan->ld_queue) {
582 /* Get the ld start address from ld_queue */
583 next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys;
584 dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%016llx\n",
585 (u64)next_dest_addr);
586 set_cdar(fsl_chan, next_dest_addr);
587 dma_start(fsl_chan);
588 } else {
589 set_cdar(fsl_chan, 0);
590 set_ndar(fsl_chan, 0);
591 }
592}
593
594/**
595 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
596 * @fsl_chan : Freescale DMA channel
597 */
598static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan)
599{
600 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
601
602#ifdef FSL_DMA_LD_DEBUG
603 struct fsl_desc_sw *ld;
604 unsigned long flags;
605
606 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
607 if (list_empty(&fsl_chan->ld_queue)) {
608 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
609 return;
610 }
611
612 dev_dbg(fsl_chan->dev, "--memcpy issue--\n");
613 list_for_each_entry(ld, &fsl_chan->ld_queue, node) {
614 int i;
615 dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n",
616 fsl_chan->id, ld->async_tx.phys);
617 for (i = 0; i < 8; i++)
618 dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n",
619 i, *(((u32 *)&ld->hw) + i));
620 }
621 dev_dbg(fsl_chan->dev, "----------------\n");
622 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
623#endif
624
625 fsl_chan_xfer_ld_queue(fsl_chan);
626}
627
628static void fsl_dma_dependency_added(struct dma_chan *chan)
629{
630 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
631
632 fsl_chan_ld_cleanup(fsl_chan);
633}
634
635/**
636 * fsl_dma_is_complete - Determine the DMA status
637 * @fsl_chan : Freescale DMA channel
638 */
639static enum dma_status fsl_dma_is_complete(struct dma_chan *chan,
640 dma_cookie_t cookie,
641 dma_cookie_t *done,
642 dma_cookie_t *used)
643{
644 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
645 dma_cookie_t last_used;
646 dma_cookie_t last_complete;
647
648 fsl_chan_ld_cleanup(fsl_chan);
649
650 last_used = chan->cookie;
651 last_complete = fsl_chan->completed_cookie;
652
653 if (done)
654 *done = last_complete;
655
656 if (used)
657 *used = last_used;
658
659 return dma_async_is_complete(cookie, last_complete, last_used);
660}
661
662static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
663{
664 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
665 dma_addr_t stat;
666
667 stat = get_sr(fsl_chan);
668 dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n",
669 fsl_chan->id, stat);
670 set_sr(fsl_chan, stat); /* Clear the event register */
671
672 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
673 if (!stat)
674 return IRQ_NONE;
675
676 if (stat & FSL_DMA_SR_TE)
677 dev_err(fsl_chan->dev, "Transfer Error!\n");
678
679 /* If the link descriptor segment transfer finishes,
680 * we will recycle the used descriptor.
681 */
682 if (stat & FSL_DMA_SR_EOSI) {
683 dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n");
684 dev_dbg(fsl_chan->dev, "event: clndar 0x%016llx, "
685 "nlndar 0x%016llx\n", (u64)get_cdar(fsl_chan),
686 (u64)get_ndar(fsl_chan));
687 stat &= ~FSL_DMA_SR_EOSI;
688 fsl_chan_ld_cleanup(fsl_chan);
689 }
690
691 /* If it current transfer is the end-of-transfer,
692 * we should clear the Channel Start bit for
693 * prepare next transfer.
694 */
695 if (stat & (FSL_DMA_SR_EOLNI | FSL_DMA_SR_EOCDI)) {
696 dev_dbg(fsl_chan->dev, "event: End-of-link INT\n");
697 stat &= ~FSL_DMA_SR_EOLNI;
698 fsl_chan_xfer_ld_queue(fsl_chan);
699 }
700
701 if (stat)
702 dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n",
703 stat);
704
705 dev_dbg(fsl_chan->dev, "event: Exit\n");
706 tasklet_schedule(&fsl_chan->tasklet);
707 return IRQ_HANDLED;
708}
709
710static irqreturn_t fsl_dma_do_interrupt(int irq, void *data)
711{
712 struct fsl_dma_device *fdev = (struct fsl_dma_device *)data;
713 u32 gsr;
714 int ch_nr;
715
716 gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base)
717 : in_le32(fdev->reg_base);
718 ch_nr = (32 - ffs(gsr)) / 8;
719
720 return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq,
721 fdev->chan[ch_nr]) : IRQ_NONE;
722}
723
724static void dma_do_tasklet(unsigned long data)
725{
726 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
727 fsl_chan_ld_cleanup(fsl_chan);
728}
729
730static void fsl_dma_callback_test(struct fsl_dma_chan *fsl_chan)
731{
732 if (fsl_chan)
733 dev_info(fsl_chan->dev, "selftest: callback is ok!\n");
734}
735
736static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
737{
738 struct dma_chan *chan;
739 int err = 0;
740 dma_addr_t dma_dest, dma_src;
741 dma_cookie_t cookie;
742 u8 *src, *dest;
743 int i;
744 size_t test_size;
745 struct dma_async_tx_descriptor *tx1, *tx2, *tx3;
746
747 test_size = 4096;
748
749 src = kmalloc(test_size * 2, GFP_KERNEL);
750 if (!src) {
751 dev_err(fsl_chan->dev,
752 "selftest: Cannot alloc memory for test!\n");
753 err = -ENOMEM;
754 goto out;
755 }
756
757 dest = src + test_size;
758
759 for (i = 0; i < test_size; i++)
760 src[i] = (u8) i;
761
762 chan = &fsl_chan->common;
763
764 if (fsl_dma_alloc_chan_resources(chan) < 1) {
765 dev_err(fsl_chan->dev,
766 "selftest: Cannot alloc resources for DMA\n");
767 err = -ENODEV;
768 goto out;
769 }
770
771 /* TX 1 */
772 dma_src = dma_map_single(fsl_chan->dev, src, test_size / 2,
773 DMA_TO_DEVICE);
774 dma_dest = dma_map_single(fsl_chan->dev, dest, test_size / 2,
775 DMA_FROM_DEVICE);
776 tx1 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 2, 0);
777 async_tx_ack(tx1);
778
779 cookie = fsl_dma_tx_submit(tx1);
780 fsl_dma_memcpy_issue_pending(chan);
781 msleep(2);
782
783 if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) {
784 dev_err(fsl_chan->dev, "selftest: Time out!\n");
785 err = -ENODEV;
786 goto out;
787 }
788
789 /* Test free and re-alloc channel resources */
790 fsl_dma_free_chan_resources(chan);
791
792 if (fsl_dma_alloc_chan_resources(chan) < 1) {
793 dev_err(fsl_chan->dev,
794 "selftest: Cannot alloc resources for DMA\n");
795 err = -ENODEV;
796 goto free_resources;
797 }
798
799 /* Continue to test
800 * TX 2
801 */
802 dma_src = dma_map_single(fsl_chan->dev, src + test_size / 2,
803 test_size / 4, DMA_TO_DEVICE);
804 dma_dest = dma_map_single(fsl_chan->dev, dest + test_size / 2,
805 test_size / 4, DMA_FROM_DEVICE);
806 tx2 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0);
807 async_tx_ack(tx2);
808
809 /* TX 3 */
810 dma_src = dma_map_single(fsl_chan->dev, src + test_size * 3 / 4,
811 test_size / 4, DMA_TO_DEVICE);
812 dma_dest = dma_map_single(fsl_chan->dev, dest + test_size * 3 / 4,
813 test_size / 4, DMA_FROM_DEVICE);
814 tx3 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0);
815 async_tx_ack(tx3);
816
817 /* Test exchanging the prepared tx sort */
818 cookie = fsl_dma_tx_submit(tx3);
819 cookie = fsl_dma_tx_submit(tx2);
820
821#ifdef FSL_DMA_CALLBACKTEST
822 if (dma_has_cap(DMA_INTERRUPT, ((struct fsl_dma_device *)
823 dev_get_drvdata(fsl_chan->dev->parent))->common.cap_mask)) {
824 tx3->callback = fsl_dma_callback_test;
825 tx3->callback_param = fsl_chan;
826 }
827#endif
828 fsl_dma_memcpy_issue_pending(chan);
829 msleep(2);
830
831 if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) {
832 dev_err(fsl_chan->dev, "selftest: Time out!\n");
833 err = -ENODEV;
834 goto free_resources;
835 }
836
837 err = memcmp(src, dest, test_size);
838 if (err) {
839 for (i = 0; (*(src + i) == *(dest + i)) && (i < test_size);
840 i++);
841 dev_err(fsl_chan->dev, "selftest: Test failed, data %d/%d is "
842 "error! src 0x%x, dest 0x%x\n",
843 i, test_size, *(src + i), *(dest + i));
844 }
845
846free_resources:
847 fsl_dma_free_chan_resources(chan);
848out:
849 kfree(src);
850 return err;
851}
852
853static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
854 const struct of_device_id *match)
855{
856 struct fsl_dma_device *fdev;
857 struct fsl_dma_chan *new_fsl_chan;
858 int err;
859
860 fdev = dev_get_drvdata(dev->dev.parent);
861 BUG_ON(!fdev);
862
863 /* alloc channel */
864 new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL);
865 if (!new_fsl_chan) {
866 dev_err(&dev->dev, "No free memory for allocating "
867 "dma channels!\n");
868 err = -ENOMEM;
869 goto err;
870 }
871
872 /* get dma channel register base */
873 err = of_address_to_resource(dev->node, 0, &new_fsl_chan->reg);
874 if (err) {
875 dev_err(&dev->dev, "Can't get %s property 'reg'\n",
876 dev->node->full_name);
877 goto err;
878 }
879
880 new_fsl_chan->feature = *(u32 *)match->data;
881
882 if (!fdev->feature)
883 fdev->feature = new_fsl_chan->feature;
884
885 /* If the DMA device's feature is different than its channels',
886 * report the bug.
887 */
888 WARN_ON(fdev->feature != new_fsl_chan->feature);
889
890 new_fsl_chan->dev = &dev->dev;
891 new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start,
892 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
893
894 new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7;
895 if (new_fsl_chan->id > FSL_DMA_MAX_CHANS_PER_DEVICE) {
896 dev_err(&dev->dev, "There is no %d channel!\n",
897 new_fsl_chan->id);
898 err = -EINVAL;
899 goto err;
900 }
901 fdev->chan[new_fsl_chan->id] = new_fsl_chan;
902 tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet,
903 (unsigned long)new_fsl_chan);
904
905 /* Init the channel */
906 dma_init(new_fsl_chan);
907
908 /* Clear cdar registers */
909 set_cdar(new_fsl_chan, 0);
910
911 switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) {
912 case FSL_DMA_IP_85XX:
913 new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start;
914 new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
915 case FSL_DMA_IP_83XX:
916 new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size;
917 new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size;
918 }
919
920 spin_lock_init(&new_fsl_chan->desc_lock);
921 INIT_LIST_HEAD(&new_fsl_chan->ld_queue);
922
923 new_fsl_chan->common.device = &fdev->common;
924
925 /* Add the channel to DMA device channel list */
926 list_add_tail(&new_fsl_chan->common.device_node,
927 &fdev->common.channels);
928 fdev->common.chancnt++;
929
930 new_fsl_chan->irq = irq_of_parse_and_map(dev->node, 0);
931 if (new_fsl_chan->irq != NO_IRQ) {
932 err = request_irq(new_fsl_chan->irq,
933 &fsl_dma_chan_do_interrupt, IRQF_SHARED,
934 "fsldma-channel", new_fsl_chan);
935 if (err) {
936 dev_err(&dev->dev, "DMA channel %s request_irq error "
937 "with return %d\n", dev->node->full_name, err);
938 goto err;
939 }
940 }
941
942#ifdef CONFIG_FSL_DMA_SELFTEST
943 err = fsl_dma_self_test(new_fsl_chan);
944 if (err)
945 goto err;
946#endif
947
948 dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
949 match->compatible, new_fsl_chan->irq);
950
951 return 0;
952err:
953 dma_halt(new_fsl_chan);
954 iounmap(new_fsl_chan->reg_base);
955 free_irq(new_fsl_chan->irq, new_fsl_chan);
956 list_del(&new_fsl_chan->common.device_node);
957 kfree(new_fsl_chan);
958 return err;
959}
960
961const u32 mpc8540_dma_ip_feature = FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN;
962const u32 mpc8349_dma_ip_feature = FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN;
963
964static struct of_device_id of_fsl_dma_chan_ids[] = {
965 {
966 .compatible = "fsl,mpc8540-dma-channel",
967 .data = (void *)&mpc8540_dma_ip_feature,
968 },
969 {
970 .compatible = "fsl,mpc8349-dma-channel",
971 .data = (void *)&mpc8349_dma_ip_feature,
972 },
973 {}
974};
975
976static struct of_platform_driver of_fsl_dma_chan_driver = {
977 .name = "of-fsl-dma-channel",
978 .match_table = of_fsl_dma_chan_ids,
979 .probe = of_fsl_dma_chan_probe,
980};
981
982static __init int of_fsl_dma_chan_init(void)
983{
984 return of_register_platform_driver(&of_fsl_dma_chan_driver);
985}
986
987static int __devinit of_fsl_dma_probe(struct of_device *dev,
988 const struct of_device_id *match)
989{
990 int err;
991 unsigned int irq;
992 struct fsl_dma_device *fdev;
993
994 fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL);
995 if (!fdev) {
996 dev_err(&dev->dev, "No enough memory for 'priv'\n");
997 err = -ENOMEM;
998 goto err;
999 }
1000 fdev->dev = &dev->dev;
1001 INIT_LIST_HEAD(&fdev->common.channels);
1002
1003 /* get DMA controller register base */
1004 err = of_address_to_resource(dev->node, 0, &fdev->reg);
1005 if (err) {
1006 dev_err(&dev->dev, "Can't get %s property 'reg'\n",
1007 dev->node->full_name);
1008 goto err;
1009 }
1010
1011 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
1012 "controller at 0x%08x...\n",
1013 match->compatible, fdev->reg.start);
1014 fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end
1015 - fdev->reg.start + 1);
1016
1017 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
1018 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
1019 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
1020 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
1021 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
1022 fdev->common.device_is_tx_complete = fsl_dma_is_complete;
1023 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
1024 fdev->common.device_dependency_added = fsl_dma_dependency_added;
1025 fdev->common.dev = &dev->dev;
1026
1027 irq = irq_of_parse_and_map(dev->node, 0);
1028 if (irq != NO_IRQ) {
1029 err = request_irq(irq, &fsl_dma_do_interrupt, IRQF_SHARED,
1030 "fsldma-device", fdev);
1031 if (err) {
1032 dev_err(&dev->dev, "DMA device request_irq error "
1033 "with return %d\n", err);
1034 goto err;
1035 }
1036 }
1037
1038 dev_set_drvdata(&(dev->dev), fdev);
1039 of_platform_bus_probe(dev->node, of_fsl_dma_chan_ids, &dev->dev);
1040
1041 dma_async_device_register(&fdev->common);
1042 return 0;
1043
1044err:
1045 iounmap(fdev->reg_base);
1046 kfree(fdev);
1047 return err;
1048}
1049
1050static struct of_device_id of_fsl_dma_ids[] = {
1051 { .compatible = "fsl,mpc8540-dma", },
1052 { .compatible = "fsl,mpc8349-dma", },
1053 {}
1054};
1055
1056static struct of_platform_driver of_fsl_dma_driver = {
1057 .name = "of-fsl-dma",
1058 .match_table = of_fsl_dma_ids,
1059 .probe = of_fsl_dma_probe,
1060};
1061
1062static __init int of_fsl_dma_init(void)
1063{
1064 return of_register_platform_driver(&of_fsl_dma_driver);
1065}
1066
1067subsys_initcall(of_fsl_dma_chan_init);
1068subsys_initcall(of_fsl_dma_init);
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
new file mode 100644
index 000000000000..ba78c42121ba
--- /dev/null
+++ b/drivers/dma/fsldma.h
@@ -0,0 +1,189 @@
1/*
2 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
3 *
4 * Author:
5 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
6 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007
7 *
8 * This is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 */
14#ifndef __DMA_FSLDMA_H
15#define __DMA_FSLDMA_H
16
17#include <linux/device.h>
18#include <linux/dmapool.h>
19#include <linux/dmaengine.h>
20
21/* Define data structures needed by Freescale
22 * MPC8540 and MPC8349 DMA controller.
23 */
24#define FSL_DMA_MR_CS 0x00000001
25#define FSL_DMA_MR_CC 0x00000002
26#define FSL_DMA_MR_CA 0x00000008
27#define FSL_DMA_MR_EIE 0x00000040
28#define FSL_DMA_MR_XFE 0x00000020
29#define FSL_DMA_MR_EOLNIE 0x00000100
30#define FSL_DMA_MR_EOLSIE 0x00000080
31#define FSL_DMA_MR_EOSIE 0x00000200
32#define FSL_DMA_MR_CDSM 0x00000010
33#define FSL_DMA_MR_CTM 0x00000004
34#define FSL_DMA_MR_EMP_EN 0x00200000
35#define FSL_DMA_MR_EMS_EN 0x00040000
36#define FSL_DMA_MR_DAHE 0x00002000
37#define FSL_DMA_MR_SAHE 0x00001000
38
39/* Special MR definition for MPC8349 */
40#define FSL_DMA_MR_EOTIE 0x00000080
41
42#define FSL_DMA_SR_CH 0x00000020
43#define FSL_DMA_SR_CB 0x00000004
44#define FSL_DMA_SR_TE 0x00000080
45#define FSL_DMA_SR_EOSI 0x00000002
46#define FSL_DMA_SR_EOLSI 0x00000001
47#define FSL_DMA_SR_EOCDI 0x00000001
48#define FSL_DMA_SR_EOLNI 0x00000008
49
50#define FSL_DMA_SATR_SBPATMU 0x20000000
51#define FSL_DMA_SATR_STRANSINT_RIO 0x00c00000
52#define FSL_DMA_SATR_SREADTYPE_SNOOP_READ 0x00050000
53#define FSL_DMA_SATR_SREADTYPE_BP_IORH 0x00020000
54#define FSL_DMA_SATR_SREADTYPE_BP_NREAD 0x00040000
55#define FSL_DMA_SATR_SREADTYPE_BP_MREAD 0x00070000
56
57#define FSL_DMA_DATR_DBPATMU 0x20000000
58#define FSL_DMA_DATR_DTRANSINT_RIO 0x00c00000
59#define FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE 0x00050000
60#define FSL_DMA_DATR_DWRITETYPE_BP_FLUSH 0x00010000
61
62#define FSL_DMA_EOL ((u64)0x1)
63#define FSL_DMA_SNEN ((u64)0x10)
64#define FSL_DMA_EOSIE 0x8
65#define FSL_DMA_NLDA_MASK (~(u64)0x1f)
66
67#define FSL_DMA_BCR_MAX_CNT 0x03ffffffu
68
69#define FSL_DMA_DGSR_TE 0x80
70#define FSL_DMA_DGSR_CH 0x20
71#define FSL_DMA_DGSR_PE 0x10
72#define FSL_DMA_DGSR_EOLNI 0x08
73#define FSL_DMA_DGSR_CB 0x04
74#define FSL_DMA_DGSR_EOSI 0x02
75#define FSL_DMA_DGSR_EOLSI 0x01
76
77struct fsl_dma_ld_hw {
78 u64 __bitwise src_addr;
79 u64 __bitwise dst_addr;
80 u64 __bitwise next_ln_addr;
81 u32 __bitwise count;
82 u32 __bitwise reserve;
83} __attribute__((aligned(32)));
84
85struct fsl_desc_sw {
86 struct fsl_dma_ld_hw hw;
87 struct list_head node;
88 struct dma_async_tx_descriptor async_tx;
89 struct list_head *ld;
90 void *priv;
91} __attribute__((aligned(32)));
92
93struct fsl_dma_chan_regs {
94 u32 __bitwise mr; /* 0x00 - Mode Register */
95 u32 __bitwise sr; /* 0x04 - Status Register */
96 u64 __bitwise cdar; /* 0x08 - Current descriptor address register */
97 u64 __bitwise sar; /* 0x10 - Source Address Register */
98 u64 __bitwise dar; /* 0x18 - Destination Address Register */
99 u32 __bitwise bcr; /* 0x20 - Byte Count Register */
100 u64 __bitwise ndar; /* 0x24 - Next Descriptor Address Register */
101};
102
103struct fsl_dma_chan;
104#define FSL_DMA_MAX_CHANS_PER_DEVICE 4
105
106struct fsl_dma_device {
107 void __iomem *reg_base; /* DGSR register base */
108 struct resource reg; /* Resource for register */
109 struct device *dev;
110 struct dma_device common;
111 struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
112 u32 feature; /* The same as DMA channels */
113};
114
115/* Define macros for fsl_dma_chan->feature property */
116#define FSL_DMA_LITTLE_ENDIAN 0x00000000
117#define FSL_DMA_BIG_ENDIAN 0x00000001
118
119#define FSL_DMA_IP_MASK 0x00000ff0
120#define FSL_DMA_IP_85XX 0x00000010
121#define FSL_DMA_IP_83XX 0x00000020
122
123#define FSL_DMA_CHAN_PAUSE_EXT 0x00001000
124#define FSL_DMA_CHAN_START_EXT 0x00002000
125
126struct fsl_dma_chan {
127 struct fsl_dma_chan_regs __iomem *reg_base;
128 dma_cookie_t completed_cookie; /* The maximum cookie completed */
129 spinlock_t desc_lock; /* Descriptor operation lock */
130 struct list_head ld_queue; /* Link descriptors queue */
131 struct dma_chan common; /* DMA common channel */
132 struct dma_pool *desc_pool; /* Descriptors pool */
133 struct device *dev; /* Channel device */
134 struct resource reg; /* Resource for register */
135 int irq; /* Channel IRQ */
136 int id; /* Raw id of this channel */
137 struct tasklet_struct tasklet;
138 u32 feature;
139
140 void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int size);
141 void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable);
142 void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size);
143 void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size);
144};
145
146#define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common)
147#define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node)
148#define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx)
149
150#ifndef __powerpc64__
151static u64 in_be64(const u64 __iomem *addr)
152{
153 return ((u64)in_be32((u32 *)addr) << 32) | (in_be32((u32 *)addr + 1));
154}
155
156static void out_be64(u64 __iomem *addr, u64 val)
157{
158 out_be32((u32 *)addr, val >> 32);
159 out_be32((u32 *)addr + 1, (u32)val);
160}
161
162/* There is no asm instructions for 64 bits reverse loads and stores */
163static u64 in_le64(const u64 __iomem *addr)
164{
165 return ((u64)in_le32((u32 *)addr + 1) << 32) | (in_le32((u32 *)addr));
166}
167
168static void out_le64(u64 __iomem *addr, u64 val)
169{
170 out_le32((u32 *)addr + 1, val >> 32);
171 out_le32((u32 *)addr, (u32)val);
172}
173#endif
174
175#define DMA_IN(fsl_chan, addr, width) \
176 (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
177 in_be##width(addr) : in_le##width(addr))
178#define DMA_OUT(fsl_chan, addr, val, width) \
179 (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
180 out_be##width(addr, val) : out_le##width(addr, val))
181
182#define DMA_TO_CPU(fsl_chan, d, width) \
183 (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
184 be##width##_to_cpu(d) : le##width##_to_cpu(d))
185#define CPU_TO_DMA(fsl_chan, c, width) \
186 (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
187 cpu_to_be##width(c) : cpu_to_le##width(c))
188
189#endif /* __DMA_FSLDMA_H */