aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorNobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>2009-09-06 23:26:23 -0400
committerDan Williams <dan.j.williams@intel.com>2009-09-08 20:56:02 -0400
commitd8902adcc1a9fd484c8cb5e575152e32192c1ff8 (patch)
tree305109ce60db5ea9710dddce9db8a23f65ff4572 /drivers/dma
parent9134d02bc0af4a8747d448d1f811ec5f8eb96df6 (diff)
dmaengine: sh: Add Support SuperH DMA Engine driver
This supported all DMA channels, and it was tested in SH7722, SH7780, SH7785 and SH7763. This can not use with SH DMA API. Signed-off-by: Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> Reviewed-by: Matt Fleming <matt@console-pimps.org> Acked-by: Maciej Sosnowski <maciej.sosnowski@intel.com> Acked-by: Paul Mundt <lethal@linux-sh.org> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig8
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/shdma.c786
-rw-r--r--drivers/dma/shdma.h64
4 files changed, 859 insertions, 0 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index fe1f3717b1ff..3230a780c3de 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -101,6 +101,14 @@ config TXX9_DMAC
101 Support the TXx9 SoC internal DMA controller. This can be 101 Support the TXx9 SoC internal DMA controller. This can be
102 integrated in chips such as the Toshiba TX4927/38/39. 102 integrated in chips such as the Toshiba TX4927/38/39.
103 103
104config SH_DMAE
105 tristate "Renesas SuperH DMAC support"
106 depends on SUPERH && SH_DMA
107 depends on !SH_DMA_API
108 select DMA_ENGINE
109 help
110 Enable support for the Renesas SuperH DMA controllers.
111
104config DMA_ENGINE 112config DMA_ENGINE
105 bool 113 bool
106 114
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 8f115e93b4a1..eca71ba78ae9 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_DW_DMAC) += dw_dmac.o
9obj-$(CONFIG_AT_HDMAC) += at_hdmac.o 9obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
10obj-$(CONFIG_MX3_IPU) += ipu/ 10obj-$(CONFIG_MX3_IPU) += ipu/
11obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o 11obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
12obj-$(CONFIG_SH_DMAE) += shdma.o
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
new file mode 100644
index 000000000000..b3b065c4e5c1
--- /dev/null
+++ b/drivers/dma/shdma.c
@@ -0,0 +1,786 @@
1/*
2 * Renesas SuperH DMA Engine support
3 *
4 * base is drivers/dma/flsdma.c
5 *
6 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
8 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
9 *
10 * This is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * - DMA of SuperH does not have Hardware DMA chain mode.
16 * - MAX DMA size is 16MB.
17 *
18 */
19
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/interrupt.h>
23#include <linux/dmaengine.h>
24#include <linux/delay.h>
25#include <linux/dma-mapping.h>
26#include <linux/dmapool.h>
27#include <linux/platform_device.h>
28#include <cpu/dma.h>
29#include <asm/dma-sh.h>
30#include "shdma.h"
31
32/* DMA descriptor control */
33#define DESC_LAST (-1)
34#define DESC_COMP (1)
35#define DESC_NCOMP (0)
36
37#define NR_DESCS_PER_CHANNEL 32
38/*
39 * Define the default configuration for dual address memory-memory transfer.
40 * The 0x400 value represents auto-request, external->external.
41 *
42 * And this driver set 4byte burst mode.
43 * If you want to change mode, you need to change RS_DEFAULT of value.
44 * (ex 1byte burst mode -> (RS_DUAL & ~TS_32)
45 */
46#define RS_DEFAULT (RS_DUAL)
47
48#define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id])
49static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
50{
51 ctrl_outl(data, (SH_DMAC_CHAN_BASE(sh_dc->id) + reg));
52}
53
54static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
55{
56 return ctrl_inl((SH_DMAC_CHAN_BASE(sh_dc->id) + reg));
57}
58
59static void dmae_init(struct sh_dmae_chan *sh_chan)
60{
61 u32 chcr = RS_DEFAULT; /* default is DUAL mode */
62 sh_dmae_writel(sh_chan, chcr, CHCR);
63}
64
65/*
66 * Reset DMA controller
67 *
68 * SH7780 has two DMAOR register
69 */
70static void sh_dmae_ctl_stop(int id)
71{
72 unsigned short dmaor = dmaor_read_reg(id);
73
74 dmaor &= ~(DMAOR_NMIF | DMAOR_AE);
75 dmaor_write_reg(id, dmaor);
76}
77
78static int sh_dmae_rst(int id)
79{
80 unsigned short dmaor;
81
82 sh_dmae_ctl_stop(id);
83 dmaor = (dmaor_read_reg(id)|DMAOR_INIT);
84
85 dmaor_write_reg(id, dmaor);
86 if ((dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF))) {
87 pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n");
88 return -EINVAL;
89 }
90 return 0;
91}
92
93static int dmae_is_idle(struct sh_dmae_chan *sh_chan)
94{
95 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
96 if (chcr & CHCR_DE) {
97 if (!(chcr & CHCR_TE))
98 return -EBUSY; /* working */
99 }
100 return 0; /* waiting */
101}
102
103static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan)
104{
105 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
106 return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT];
107}
108
109static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs hw)
110{
111 sh_dmae_writel(sh_chan, hw.sar, SAR);
112 sh_dmae_writel(sh_chan, hw.dar, DAR);
113 sh_dmae_writel(sh_chan,
114 (hw.tcr >> calc_xmit_shift(sh_chan)), TCR);
115}
116
117static void dmae_start(struct sh_dmae_chan *sh_chan)
118{
119 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
120
121 chcr |= (CHCR_DE|CHCR_IE);
122 sh_dmae_writel(sh_chan, chcr, CHCR);
123}
124
125static void dmae_halt(struct sh_dmae_chan *sh_chan)
126{
127 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
128
129 chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
130 sh_dmae_writel(sh_chan, chcr, CHCR);
131}
132
133static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
134{
135 int ret = dmae_is_idle(sh_chan);
136 /* When DMA was working, can not set data to CHCR */
137 if (ret)
138 return ret;
139
140 sh_dmae_writel(sh_chan, val, CHCR);
141 return 0;
142}
143
144#define DMARS1_ADDR 0x04
145#define DMARS2_ADDR 0x08
146#define DMARS_SHIFT 8
147#define DMARS_CHAN_MSK 0x01
148static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
149{
150 u32 addr;
151 int shift = 0;
152 int ret = dmae_is_idle(sh_chan);
153 if (ret)
154 return ret;
155
156 if (sh_chan->id & DMARS_CHAN_MSK)
157 shift = DMARS_SHIFT;
158
159 switch (sh_chan->id) {
160 /* DMARS0 */
161 case 0:
162 case 1:
163 addr = SH_DMARS_BASE;
164 break;
165 /* DMARS1 */
166 case 2:
167 case 3:
168 addr = (SH_DMARS_BASE + DMARS1_ADDR);
169 break;
170 /* DMARS2 */
171 case 4:
172 case 5:
173 addr = (SH_DMARS_BASE + DMARS2_ADDR);
174 break;
175 default:
176 return -EINVAL;
177 }
178
179 ctrl_outw((val << shift) |
180 (ctrl_inw(addr) & (shift ? 0xFF00 : 0x00FF)),
181 addr);
182
183 return 0;
184}
185
186static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
187{
188 struct sh_desc *desc = tx_to_sh_desc(tx);
189 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
190 dma_cookie_t cookie;
191
192 spin_lock_bh(&sh_chan->desc_lock);
193
194 cookie = sh_chan->common.cookie;
195 cookie++;
196 if (cookie < 0)
197 cookie = 1;
198
199 /* If desc only in the case of 1 */
200 if (desc->async_tx.cookie != -EBUSY)
201 desc->async_tx.cookie = cookie;
202 sh_chan->common.cookie = desc->async_tx.cookie;
203
204 list_splice_init(&desc->tx_list, sh_chan->ld_queue.prev);
205
206 spin_unlock_bh(&sh_chan->desc_lock);
207
208 return cookie;
209}
210
211static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
212{
213 struct sh_desc *desc, *_desc, *ret = NULL;
214
215 spin_lock_bh(&sh_chan->desc_lock);
216 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_free, node) {
217 if (async_tx_test_ack(&desc->async_tx)) {
218 list_del(&desc->node);
219 ret = desc;
220 break;
221 }
222 }
223 spin_unlock_bh(&sh_chan->desc_lock);
224
225 return ret;
226}
227
228static void sh_dmae_put_desc(struct sh_dmae_chan *sh_chan, struct sh_desc *desc)
229{
230 if (desc) {
231 spin_lock_bh(&sh_chan->desc_lock);
232
233 list_splice_init(&desc->tx_list, &sh_chan->ld_free);
234 list_add(&desc->node, &sh_chan->ld_free);
235
236 spin_unlock_bh(&sh_chan->desc_lock);
237 }
238}
239
240static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
241{
242 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
243 struct sh_desc *desc;
244
245 spin_lock_bh(&sh_chan->desc_lock);
246 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
247 spin_unlock_bh(&sh_chan->desc_lock);
248 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
249 if (!desc) {
250 spin_lock_bh(&sh_chan->desc_lock);
251 break;
252 }
253 dma_async_tx_descriptor_init(&desc->async_tx,
254 &sh_chan->common);
255 desc->async_tx.tx_submit = sh_dmae_tx_submit;
256 desc->async_tx.flags = DMA_CTRL_ACK;
257 INIT_LIST_HEAD(&desc->tx_list);
258 sh_dmae_put_desc(sh_chan, desc);
259
260 spin_lock_bh(&sh_chan->desc_lock);
261 sh_chan->descs_allocated++;
262 }
263 spin_unlock_bh(&sh_chan->desc_lock);
264
265 return sh_chan->descs_allocated;
266}
267
268/*
269 * sh_dma_free_chan_resources - Free all resources of the channel.
270 */
271static void sh_dmae_free_chan_resources(struct dma_chan *chan)
272{
273 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
274 struct sh_desc *desc, *_desc;
275 LIST_HEAD(list);
276
277 BUG_ON(!list_empty(&sh_chan->ld_queue));
278 spin_lock_bh(&sh_chan->desc_lock);
279
280 list_splice_init(&sh_chan->ld_free, &list);
281 sh_chan->descs_allocated = 0;
282
283 spin_unlock_bh(&sh_chan->desc_lock);
284
285 list_for_each_entry_safe(desc, _desc, &list, node)
286 kfree(desc);
287}
288
289static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
290 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
291 size_t len, unsigned long flags)
292{
293 struct sh_dmae_chan *sh_chan;
294 struct sh_desc *first = NULL, *prev = NULL, *new;
295 size_t copy_size;
296
297 if (!chan)
298 return NULL;
299
300 if (!len)
301 return NULL;
302
303 sh_chan = to_sh_chan(chan);
304
305 do {
306 /* Allocate the link descriptor from DMA pool */
307 new = sh_dmae_get_desc(sh_chan);
308 if (!new) {
309 dev_err(sh_chan->dev,
310 "No free memory for link descriptor\n");
311 goto err_get_desc;
312 }
313
314 copy_size = min(len, (size_t)SH_DMA_TCR_MAX);
315
316 new->hw.sar = dma_src;
317 new->hw.dar = dma_dest;
318 new->hw.tcr = copy_size;
319 if (!first)
320 first = new;
321
322 new->mark = DESC_NCOMP;
323 async_tx_ack(&new->async_tx);
324
325 prev = new;
326 len -= copy_size;
327 dma_src += copy_size;
328 dma_dest += copy_size;
329 /* Insert the link descriptor to the LD ring */
330 list_add_tail(&new->node, &first->tx_list);
331 } while (len);
332
333 new->async_tx.flags = flags; /* client is in control of this ack */
334 new->async_tx.cookie = -EBUSY; /* Last desc */
335
336 return &first->async_tx;
337
338err_get_desc:
339 sh_dmae_put_desc(sh_chan, first);
340 return NULL;
341
342}
343
344/*
345 * sh_chan_ld_cleanup - Clean up link descriptors
346 *
347 * This function clean up the ld_queue of DMA channel.
348 */
349static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan)
350{
351 struct sh_desc *desc, *_desc;
352
353 spin_lock_bh(&sh_chan->desc_lock);
354 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
355 dma_async_tx_callback callback;
356 void *callback_param;
357
358 /* non send data */
359 if (desc->mark == DESC_NCOMP)
360 break;
361
362 /* send data sesc */
363 callback = desc->async_tx.callback;
364 callback_param = desc->async_tx.callback_param;
365
366 /* Remove from ld_queue list */
367 list_splice_init(&desc->tx_list, &sh_chan->ld_free);
368
369 dev_dbg(sh_chan->dev, "link descriptor %p will be recycle.\n",
370 desc);
371
372 list_move(&desc->node, &sh_chan->ld_free);
373 /* Run the link descriptor callback function */
374 if (callback) {
375 spin_unlock_bh(&sh_chan->desc_lock);
376 dev_dbg(sh_chan->dev, "link descriptor %p callback\n",
377 desc);
378 callback(callback_param);
379 spin_lock_bh(&sh_chan->desc_lock);
380 }
381 }
382 spin_unlock_bh(&sh_chan->desc_lock);
383}
384
385static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
386{
387 struct list_head *ld_node;
388 struct sh_dmae_regs hw;
389
390 /* DMA work check */
391 if (dmae_is_idle(sh_chan))
392 return;
393
394 /* Find the first un-transfer desciptor */
395 for (ld_node = sh_chan->ld_queue.next;
396 (ld_node != &sh_chan->ld_queue)
397 && (to_sh_desc(ld_node)->mark == DESC_COMP);
398 ld_node = ld_node->next)
399 cpu_relax();
400
401 if (ld_node != &sh_chan->ld_queue) {
402 /* Get the ld start address from ld_queue */
403 hw = to_sh_desc(ld_node)->hw;
404 dmae_set_reg(sh_chan, hw);
405 dmae_start(sh_chan);
406 }
407}
408
409static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
410{
411 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
412 sh_chan_xfer_ld_queue(sh_chan);
413}
414
415static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
416 dma_cookie_t cookie,
417 dma_cookie_t *done,
418 dma_cookie_t *used)
419{
420 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
421 dma_cookie_t last_used;
422 dma_cookie_t last_complete;
423
424 sh_dmae_chan_ld_cleanup(sh_chan);
425
426 last_used = chan->cookie;
427 last_complete = sh_chan->completed_cookie;
428 if (last_complete == -EBUSY)
429 last_complete = last_used;
430
431 if (done)
432 *done = last_complete;
433
434 if (used)
435 *used = last_used;
436
437 return dma_async_is_complete(cookie, last_complete, last_used);
438}
439
440static irqreturn_t sh_dmae_interrupt(int irq, void *data)
441{
442 irqreturn_t ret = IRQ_NONE;
443 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
444 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
445
446 if (chcr & CHCR_TE) {
447 /* DMA stop */
448 dmae_halt(sh_chan);
449
450 ret = IRQ_HANDLED;
451 tasklet_schedule(&sh_chan->tasklet);
452 }
453
454 return ret;
455}
456
457#if defined(CONFIG_CPU_SH4)
458static irqreturn_t sh_dmae_err(int irq, void *data)
459{
460 int err = 0;
461 struct sh_dmae_device *shdev = (struct sh_dmae_device *)data;
462
463 /* IRQ Multi */
464 if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
465 int cnt = 0;
466 switch (irq) {
467#if defined(DMTE6_IRQ) && defined(DMAE1_IRQ)
468 case DMTE6_IRQ:
469 cnt++;
470#endif
471 case DMTE0_IRQ:
472 if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) {
473 disable_irq(irq);
474 return IRQ_HANDLED;
475 }
476 default:
477 return IRQ_NONE;
478 }
479 } else {
480 /* reset dma controller */
481 err = sh_dmae_rst(0);
482 if (err)
483 return err;
484 if (shdev->pdata.mode & SHDMA_DMAOR1) {
485 err = sh_dmae_rst(1);
486 if (err)
487 return err;
488 }
489 disable_irq(irq);
490 return IRQ_HANDLED;
491 }
492}
493#endif
494
495static void dmae_do_tasklet(unsigned long data)
496{
497 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
498 struct sh_desc *desc, *_desc, *cur_desc = NULL;
499 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
500 list_for_each_entry_safe(desc, _desc,
501 &sh_chan->ld_queue, node) {
502 if ((desc->hw.sar + desc->hw.tcr) == sar_buf) {
503 cur_desc = desc;
504 break;
505 }
506 }
507
508 if (cur_desc) {
509 switch (cur_desc->async_tx.cookie) {
510 case 0: /* other desc data */
511 break;
512 case -EBUSY: /* last desc */
513 sh_chan->completed_cookie =
514 cur_desc->async_tx.cookie;
515 break;
516 default: /* first desc ( 0 < )*/
517 sh_chan->completed_cookie =
518 cur_desc->async_tx.cookie - 1;
519 break;
520 }
521 cur_desc->mark = DESC_COMP;
522 }
523 /* Next desc */
524 sh_chan_xfer_ld_queue(sh_chan);
525 sh_dmae_chan_ld_cleanup(sh_chan);
526}
527
528static unsigned int get_dmae_irq(unsigned int id)
529{
530 unsigned int irq = 0;
531 if (id < ARRAY_SIZE(dmte_irq_map))
532 irq = dmte_irq_map[id];
533 return irq;
534}
535
536static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
537{
538 int err;
539 unsigned int irq = get_dmae_irq(id);
540 unsigned long irqflags = IRQF_DISABLED;
541 struct sh_dmae_chan *new_sh_chan;
542
543 /* alloc channel */
544 new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
545 if (!new_sh_chan) {
546 dev_err(shdev->common.dev, "No free memory for allocating "
547 "dma channels!\n");
548 return -ENOMEM;
549 }
550
551 new_sh_chan->dev = shdev->common.dev;
552 new_sh_chan->id = id;
553
554 /* Init DMA tasklet */
555 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
556 (unsigned long)new_sh_chan);
557
558 /* Init the channel */
559 dmae_init(new_sh_chan);
560
561 spin_lock_init(&new_sh_chan->desc_lock);
562
563 /* Init descripter manage list */
564 INIT_LIST_HEAD(&new_sh_chan->ld_queue);
565 INIT_LIST_HEAD(&new_sh_chan->ld_free);
566
567 /* copy struct dma_device */
568 new_sh_chan->common.device = &shdev->common;
569
570 /* Add the channel to DMA device channel list */
571 list_add_tail(&new_sh_chan->common.device_node,
572 &shdev->common.channels);
573 shdev->common.chancnt++;
574
575 if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
576 irqflags = IRQF_SHARED;
577#if defined(DMTE6_IRQ)
578 if (irq >= DMTE6_IRQ)
579 irq = DMTE6_IRQ;
580 else
581#endif
582 irq = DMTE0_IRQ;
583 }
584
585 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
586 "sh-dmae%d", new_sh_chan->id);
587
588 /* set up channel irq */
589 err = request_irq(irq, &sh_dmae_interrupt,
590 irqflags, new_sh_chan->dev_id, new_sh_chan);
591 if (err) {
592 dev_err(shdev->common.dev, "DMA channel %d request_irq error "
593 "with return %d\n", id, err);
594 goto err_no_irq;
595 }
596
597 /* CHCR register control function */
598 new_sh_chan->set_chcr = dmae_set_chcr;
599 /* DMARS register control function */
600 new_sh_chan->set_dmars = dmae_set_dmars;
601
602 shdev->chan[id] = new_sh_chan;
603 return 0;
604
605err_no_irq:
606 /* remove from dmaengine device node */
607 list_del(&new_sh_chan->common.device_node);
608 kfree(new_sh_chan);
609 return err;
610}
611
612static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
613{
614 int i;
615
616 for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
617 if (shdev->chan[i]) {
618 struct sh_dmae_chan *shchan = shdev->chan[i];
619 if (!(shdev->pdata.mode & SHDMA_MIX_IRQ))
620 free_irq(dmte_irq_map[i], shchan);
621
622 list_del(&shchan->common.device_node);
623 kfree(shchan);
624 shdev->chan[i] = NULL;
625 }
626 }
627 shdev->common.chancnt = 0;
628}
629
630static int __init sh_dmae_probe(struct platform_device *pdev)
631{
632 int err = 0, cnt, ecnt;
633 unsigned long irqflags = IRQF_DISABLED;
634#if defined(CONFIG_CPU_SH4)
635 int eirq[] = { DMAE0_IRQ,
636#if defined(DMAE1_IRQ)
637 DMAE1_IRQ
638#endif
639 };
640#endif
641 struct sh_dmae_device *shdev;
642
643 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
644 if (!shdev) {
645 dev_err(&pdev->dev, "No enough memory\n");
646 err = -ENOMEM;
647 goto shdev_err;
648 }
649
650 /* get platform data */
651 if (!pdev->dev.platform_data)
652 goto shdev_err;
653
654 /* platform data */
655 memcpy(&shdev->pdata, pdev->dev.platform_data,
656 sizeof(struct sh_dmae_pdata));
657
658 /* reset dma controller */
659 err = sh_dmae_rst(0);
660 if (err)
661 goto rst_err;
662
663 /* SH7780/85/23 has DMAOR1 */
664 if (shdev->pdata.mode & SHDMA_DMAOR1) {
665 err = sh_dmae_rst(1);
666 if (err)
667 goto rst_err;
668 }
669
670 INIT_LIST_HEAD(&shdev->common.channels);
671
672 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
673 shdev->common.device_alloc_chan_resources
674 = sh_dmae_alloc_chan_resources;
675 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
676 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
677 shdev->common.device_is_tx_complete = sh_dmae_is_complete;
678 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
679 shdev->common.dev = &pdev->dev;
680
681#if defined(CONFIG_CPU_SH4)
682 /* Non Mix IRQ mode SH7722/SH7730 etc... */
683 if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
684 irqflags = IRQF_SHARED;
685 eirq[0] = DMTE0_IRQ;
686#if defined(DMTE6_IRQ) && defined(DMAE1_IRQ)
687 eirq[1] = DMTE6_IRQ;
688#endif
689 }
690
691 for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) {
692 err = request_irq(eirq[ecnt], sh_dmae_err,
693 irqflags, "DMAC Address Error", shdev);
694 if (err) {
695 dev_err(&pdev->dev, "DMA device request_irq"
696 "error (irq %d) with return %d\n",
697 eirq[ecnt], err);
698 goto eirq_err;
699 }
700 }
701#endif /* CONFIG_CPU_SH4 */
702
703 /* Create DMA Channel */
704 for (cnt = 0 ; cnt < MAX_DMA_CHANNELS ; cnt++) {
705 err = sh_dmae_chan_probe(shdev, cnt);
706 if (err)
707 goto chan_probe_err;
708 }
709
710 platform_set_drvdata(pdev, shdev);
711 dma_async_device_register(&shdev->common);
712
713 return err;
714
715chan_probe_err:
716 sh_dmae_chan_remove(shdev);
717
718eirq_err:
719 for (ecnt-- ; ecnt >= 0; ecnt--)
720 free_irq(eirq[ecnt], shdev);
721
722rst_err:
723 kfree(shdev);
724
725shdev_err:
726 return err;
727}
728
729static int __exit sh_dmae_remove(struct platform_device *pdev)
730{
731 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
732
733 dma_async_device_unregister(&shdev->common);
734
735 if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
736 free_irq(DMTE0_IRQ, shdev);
737#if defined(DMTE6_IRQ)
738 free_irq(DMTE6_IRQ, shdev);
739#endif
740 }
741
742 /* channel data remove */
743 sh_dmae_chan_remove(shdev);
744
745 if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) {
746 free_irq(DMAE0_IRQ, shdev);
747#if defined(DMAE1_IRQ)
748 free_irq(DMAE1_IRQ, shdev);
749#endif
750 }
751 kfree(shdev);
752
753 return 0;
754}
755
756static void sh_dmae_shutdown(struct platform_device *pdev)
757{
758 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
759 sh_dmae_ctl_stop(0);
760 if (shdev->pdata.mode & SHDMA_DMAOR1)
761 sh_dmae_ctl_stop(1);
762}
763
764static struct platform_driver sh_dmae_driver = {
765 .remove = __exit_p(sh_dmae_remove),
766 .shutdown = sh_dmae_shutdown,
767 .driver = {
768 .name = "sh-dma-engine",
769 },
770};
771
772static int __init sh_dmae_init(void)
773{
774 return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
775}
776module_init(sh_dmae_init);
777
778static void __exit sh_dmae_exit(void)
779{
780 platform_driver_unregister(&sh_dmae_driver);
781}
782module_exit(sh_dmae_exit);
783
784MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
785MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
786MODULE_LICENSE("GPL");
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
new file mode 100644
index 000000000000..2b4bc15a2c0a
--- /dev/null
+++ b/drivers/dma/shdma.h
@@ -0,0 +1,64 @@
1/*
2 * Renesas SuperH DMA Engine support
3 *
4 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
5 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
6 *
7 * This is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 */
13#ifndef __DMA_SHDMA_H
14#define __DMA_SHDMA_H
15
16#include <linux/device.h>
17#include <linux/dmapool.h>
18#include <linux/dmaengine.h>
19
20#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */
21
22struct sh_dmae_regs {
23 u32 sar; /* SAR / source address */
24 u32 dar; /* DAR / destination address */
25 u32 tcr; /* TCR / transfer count */
26};
27
28struct sh_desc {
29 struct list_head tx_list;
30 struct sh_dmae_regs hw;
31 struct list_head node;
32 struct dma_async_tx_descriptor async_tx;
33 int mark;
34};
35
36struct sh_dmae_chan {
37 dma_cookie_t completed_cookie; /* The maximum cookie completed */
38 spinlock_t desc_lock; /* Descriptor operation lock */
39 struct list_head ld_queue; /* Link descriptors queue */
40 struct list_head ld_free; /* Link descriptors free */
41 struct dma_chan common; /* DMA common channel */
42 struct device *dev; /* Channel device */
43 struct tasklet_struct tasklet; /* Tasklet */
44 int descs_allocated; /* desc count */
45 int id; /* Raw id of this channel */
46 char dev_id[16]; /* unique name per DMAC of channel */
47
48 /* Set chcr */
49 int (*set_chcr)(struct sh_dmae_chan *sh_chan, u32 regs);
50 /* Set DMA resource */
51 int (*set_dmars)(struct sh_dmae_chan *sh_chan, u16 res);
52};
53
54struct sh_dmae_device {
55 struct dma_device common;
56 struct sh_dmae_chan *chan[MAX_DMA_CHANNELS];
57 struct sh_dmae_pdata pdata;
58};
59
60#define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common)
61#define to_sh_desc(lh) container_of(lh, struct sh_desc, node)
62#define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx)
63
64#endif /* __DMA_SHDMA_H */