aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHaavard Skinnemoen <haavard.skinnemoen@atmel.com>2008-07-08 14:59:42 -0400
committerDan Williams <dan.j.williams@intel.com>2008-07-08 14:59:42 -0400
commit3bfb1d20b547a5071d01344581eac5846ea84491 (patch)
tree3cdbd3b5d59c93f257573cc894db2a000698f02b
parentdc0ee6435cb92ccc81b14ff28d163fecc5a7f120 (diff)
dmaengine: Driver for the Synopsys DesignWare DMA controller
This adds a driver for the Synopsys DesignWare DMA controller (aka DMACA on AVR32 systems.) This DMA controller can be found integrated on the AT32AP7000 chip and is primarily meant for peripheral DMA transfer, but can also be used for memory-to-memory transfers. This patch is based on a driver from David Brownell which was based on an older version of the DMA Engine framework. It also implements the proposed extensions to the DMA Engine API for slave DMA operations. The dmatest client shows no problems, but there may still be room for improvement performance-wise. DMA slave transfer performance is definitely "good enough"; reading 100 MiB from an SD card running at ~20 MHz yields ~7.2 MiB/s average transfer rate. Full documentation for this controller can be found in the Synopsys DW AHB DMAC Databook: http://www.synopsys.com/designware/docs/iip/DW_ahb_dmac/latest/doc/dw_ahb_dmac_db.pdf The controller has lots of implementation options, so it's usually a good idea to check the data sheet of the chip it's intergrated on as well. The AT32AP7000 data sheet can be found here: http://www.atmel.com/dyn/products/datasheets.asp?family_id=682 Changes since v4: * Use client_count instead of dma_chan_is_in_use() * Add missing include * Unmap buffers unless client told us not to Changes since v3: * Update to latest DMA engine and DMA slave APIs * Embed the hw descriptor into the sw descriptor * Clean up and update MODULE_DESCRIPTION, copyright date, etc. Changes since v2: * Dequeue all pending transfers in terminate_all() * Rename dw_dmac.h -> dw_dmac_regs.h * Define and use controller-specific dma_slave data * Fix up a few outdated comments * Define hardware registers as structs (doesn't generate better code, unfortunately, but it looks nicer.) * Get number of channels from platform_data instead of hardcoding it based on CONFIG_WHATEVER_CPU. * Give slave clients exclusive access to the channel Acked-by: Maciej Sosnowski <maciej.sosnowski@intel.com>, Signed-off-by: Haavard Skinnemoen <haavard.skinnemoen@atmel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c27
-rw-r--r--drivers/dma/Kconfig9
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/dw_dmac.c1122
-rw-r--r--drivers/dma/dw_dmac_regs.h225
-rw-r--r--include/asm-avr32/arch-at32ap/at32ap700x.h16
-rw-r--r--include/linux/dw_dmac.h62
7 files changed, 1449 insertions, 13 deletions
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index 0f24b4f85c17..892e27e0d583 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -7,6 +7,7 @@
7 */ 7 */
8#include <linux/clk.h> 8#include <linux/clk.h>
9#include <linux/delay.h> 9#include <linux/delay.h>
10#include <linux/dw_dmac.h>
10#include <linux/fb.h> 11#include <linux/fb.h>
11#include <linux/init.h> 12#include <linux/init.h>
12#include <linux/platform_device.h> 13#include <linux/platform_device.h>
@@ -599,6 +600,17 @@ static void __init genclk_init_parent(struct clk *clk)
599 clk->parent = parent; 600 clk->parent = parent;
600} 601}
601 602
603static struct dw_dma_platform_data dw_dmac0_data = {
604 .nr_channels = 3,
605};
606
607static struct resource dw_dmac0_resource[] = {
608 PBMEM(0xff200000),
609 IRQ(2),
610};
611DEFINE_DEV_DATA(dw_dmac, 0);
612DEV_CLK(hclk, dw_dmac0, hsb, 10);
613
602/* -------------------------------------------------------------------- 614/* --------------------------------------------------------------------
603 * System peripherals 615 * System peripherals
604 * -------------------------------------------------------------------- */ 616 * -------------------------------------------------------------------- */
@@ -705,17 +717,6 @@ static struct clk pico_clk = {
705 .users = 1, 717 .users = 1,
706}; 718};
707 719
708static struct resource dmaca0_resource[] = {
709 {
710 .start = 0xff200000,
711 .end = 0xff20ffff,
712 .flags = IORESOURCE_MEM,
713 },
714 IRQ(2),
715};
716DEFINE_DEV(dmaca, 0);
717DEV_CLK(hclk, dmaca0, hsb, 10);
718
719/* -------------------------------------------------------------------- 720/* --------------------------------------------------------------------
720 * HMATRIX 721 * HMATRIX
721 * -------------------------------------------------------------------- */ 722 * -------------------------------------------------------------------- */
@@ -828,7 +829,7 @@ void __init at32_add_system_devices(void)
828 platform_device_register(&at32_eic0_device); 829 platform_device_register(&at32_eic0_device);
829 platform_device_register(&smc0_device); 830 platform_device_register(&smc0_device);
830 platform_device_register(&pdc_device); 831 platform_device_register(&pdc_device);
831 platform_device_register(&dmaca0_device); 832 platform_device_register(&dw_dmac0_device);
832 833
833 platform_device_register(&at32_tcb0_device); 834 platform_device_register(&at32_tcb0_device);
834 platform_device_register(&at32_tcb1_device); 835 platform_device_register(&at32_tcb1_device);
@@ -1891,7 +1892,7 @@ struct clk *at32_clock_list[] = {
1891 &smc0_mck, 1892 &smc0_mck,
1892 &pdc_hclk, 1893 &pdc_hclk,
1893 &pdc_pclk, 1894 &pdc_pclk,
1894 &dmaca0_hclk, 1895 &dw_dmac0_hclk,
1895 &pico_clk, 1896 &pico_clk,
1896 &pio0_mck, 1897 &pio0_mck,
1897 &pio1_mck, 1898 &pio1_mck,
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 4b6bd3d099cf..cd303901eb5b 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -38,6 +38,15 @@ config INTEL_IOP_ADMA
38 help 38 help
39 Enable support for the Intel(R) IOP Series RAID engines. 39 Enable support for the Intel(R) IOP Series RAID engines.
40 40
41config DW_DMAC
42 tristate "Synopsys DesignWare AHB DMA support"
43 depends on AVR32
44 select DMA_ENGINE
45 default y if CPU_AT32AP7000
46 help
47 Support the Synopsys DesignWare AHB DMA controller. This
48 can be integrated in chips such as the Atmel AT32ap7000.
49
41config FSL_DMA 50config FSL_DMA
42 bool "Freescale MPC85xx/MPC83xx DMA support" 51 bool "Freescale MPC85xx/MPC83xx DMA support"
43 depends on PPC 52 depends on PPC
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 181e3646fbfe..14f59527d4f6 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -6,3 +6,4 @@ ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o
6obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o 6obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
7obj-$(CONFIG_FSL_DMA) += fsldma.o 7obj-$(CONFIG_FSL_DMA) += fsldma.o
8obj-$(CONFIG_MV_XOR) += mv_xor.o 8obj-$(CONFIG_MV_XOR) += mv_xor.o
9obj-$(CONFIG_DW_DMAC) += dw_dmac.o
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
new file mode 100644
index 000000000000..94df91771243
--- /dev/null
+++ b/drivers/dma/dw_dmac.c
@@ -0,0 +1,1122 @@
1/*
2 * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
3 * AVR32 systems.)
4 *
5 * Copyright (C) 2007-2008 Atmel Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/clk.h>
12#include <linux/delay.h>
13#include <linux/dmaengine.h>
14#include <linux/dma-mapping.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/mm.h>
19#include <linux/module.h>
20#include <linux/platform_device.h>
21#include <linux/slab.h>
22
23#include "dw_dmac_regs.h"
24
25/*
26 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
27 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
28 * of which use ARM any more). See the "Databook" from Synopsys for
29 * information beyond what licensees probably provide.
30 *
31 * The driver has currently been tested only with the Atmel AT32AP7000,
32 * which does not support descriptor writeback.
33 */
34
35/* NOTE: DMS+SMS is system-specific. We should get this information
36 * from the platform code somehow.
37 */
38#define DWC_DEFAULT_CTLLO (DWC_CTLL_DST_MSIZE(0) \
39 | DWC_CTLL_SRC_MSIZE(0) \
40 | DWC_CTLL_DMS(0) \
41 | DWC_CTLL_SMS(1) \
42 | DWC_CTLL_LLP_D_EN \
43 | DWC_CTLL_LLP_S_EN)
44
45/*
46 * This is configuration-dependent and usually a funny size like 4095.
47 * Let's round it down to the nearest power of two.
48 *
49 * Note that this is a transfer count, i.e. if we transfer 32-bit
50 * words, we can do 8192 bytes per descriptor.
51 *
52 * This parameter is also system-specific.
53 */
54#define DWC_MAX_COUNT 2048U
55
56/*
57 * Number of descriptors to allocate for each channel. This should be
58 * made configurable somehow; preferably, the clients (at least the
59 * ones using slave transfers) should be able to give us a hint.
60 */
61#define NR_DESCS_PER_CHANNEL 64
62
63/*----------------------------------------------------------------------*/
64
65/*
66 * Because we're not relying on writeback from the controller (it may not
67 * even be configured into the core!) we don't need to use dma_pool. These
68 * descriptors -- and associated data -- are cacheable. We do need to make
69 * sure their dcache entries are written back before handing them off to
70 * the controller, though.
71 */
72
73static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
74{
75 return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
76}
77
78static struct dw_desc *dwc_first_queued(struct dw_dma_chan *dwc)
79{
80 return list_entry(dwc->queue.next, struct dw_desc, desc_node);
81}
82
83static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
84{
85 struct dw_desc *desc, *_desc;
86 struct dw_desc *ret = NULL;
87 unsigned int i = 0;
88
89 spin_lock_bh(&dwc->lock);
90 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
91 if (async_tx_test_ack(&desc->txd)) {
92 list_del(&desc->desc_node);
93 ret = desc;
94 break;
95 }
96 dev_dbg(&dwc->chan.dev, "desc %p not ACKed\n", desc);
97 i++;
98 }
99 spin_unlock_bh(&dwc->lock);
100
101 dev_vdbg(&dwc->chan.dev, "scanned %u descriptors on freelist\n", i);
102
103 return ret;
104}
105
106static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
107{
108 struct dw_desc *child;
109
110 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
111 dma_sync_single_for_cpu(dwc->chan.dev.parent,
112 child->txd.phys, sizeof(child->lli),
113 DMA_TO_DEVICE);
114 dma_sync_single_for_cpu(dwc->chan.dev.parent,
115 desc->txd.phys, sizeof(desc->lli),
116 DMA_TO_DEVICE);
117}
118
119/*
120 * Move a descriptor, including any children, to the free list.
121 * `desc' must not be on any lists.
122 */
123static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
124{
125 if (desc) {
126 struct dw_desc *child;
127
128 dwc_sync_desc_for_cpu(dwc, desc);
129
130 spin_lock_bh(&dwc->lock);
131 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
132 dev_vdbg(&dwc->chan.dev,
133 "moving child desc %p to freelist\n",
134 child);
135 list_splice_init(&desc->txd.tx_list, &dwc->free_list);
136 dev_vdbg(&dwc->chan.dev, "moving desc %p to freelist\n", desc);
137 list_add(&desc->desc_node, &dwc->free_list);
138 spin_unlock_bh(&dwc->lock);
139 }
140}
141
142/* Called with dwc->lock held and bh disabled */
143static dma_cookie_t
144dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc)
145{
146 dma_cookie_t cookie = dwc->chan.cookie;
147
148 if (++cookie < 0)
149 cookie = 1;
150
151 dwc->chan.cookie = cookie;
152 desc->txd.cookie = cookie;
153
154 return cookie;
155}
156
157/*----------------------------------------------------------------------*/
158
159/* Called with dwc->lock held and bh disabled */
160static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
161{
162 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
163
164 /* ASSERT: channel is idle */
165 if (dma_readl(dw, CH_EN) & dwc->mask) {
166 dev_err(&dwc->chan.dev,
167 "BUG: Attempted to start non-idle channel\n");
168 dev_err(&dwc->chan.dev,
169 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
170 channel_readl(dwc, SAR),
171 channel_readl(dwc, DAR),
172 channel_readl(dwc, LLP),
173 channel_readl(dwc, CTL_HI),
174 channel_readl(dwc, CTL_LO));
175
176 /* The tasklet will hopefully advance the queue... */
177 return;
178 }
179
180 channel_writel(dwc, LLP, first->txd.phys);
181 channel_writel(dwc, CTL_LO,
182 DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
183 channel_writel(dwc, CTL_HI, 0);
184 channel_set_bit(dw, CH_EN, dwc->mask);
185}
186
187/*----------------------------------------------------------------------*/
188
189static void
190dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
191{
192 dma_async_tx_callback callback;
193 void *param;
194 struct dma_async_tx_descriptor *txd = &desc->txd;
195
196 dev_vdbg(&dwc->chan.dev, "descriptor %u complete\n", txd->cookie);
197
198 dwc->completed = txd->cookie;
199 callback = txd->callback;
200 param = txd->callback_param;
201
202 dwc_sync_desc_for_cpu(dwc, desc);
203 list_splice_init(&txd->tx_list, &dwc->free_list);
204 list_move(&desc->desc_node, &dwc->free_list);
205
206 /*
207 * We use dma_unmap_page() regardless of how the buffers were
208 * mapped before they were submitted...
209 */
210 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP))
211 dma_unmap_page(dwc->chan.dev.parent, desc->lli.dar, desc->len,
212 DMA_FROM_DEVICE);
213 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
214 dma_unmap_page(dwc->chan.dev.parent, desc->lli.sar, desc->len,
215 DMA_TO_DEVICE);
216
217 /*
218 * The API requires that no submissions are done from a
219 * callback, so we don't need to drop the lock here
220 */
221 if (callback)
222 callback(param);
223}
224
225static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
226{
227 struct dw_desc *desc, *_desc;
228 LIST_HEAD(list);
229
230 if (dma_readl(dw, CH_EN) & dwc->mask) {
231 dev_err(&dwc->chan.dev,
232 "BUG: XFER bit set, but channel not idle!\n");
233
234 /* Try to continue after resetting the channel... */
235 channel_clear_bit(dw, CH_EN, dwc->mask);
236 while (dma_readl(dw, CH_EN) & dwc->mask)
237 cpu_relax();
238 }
239
240 /*
241 * Submit queued descriptors ASAP, i.e. before we go through
242 * the completed ones.
243 */
244 if (!list_empty(&dwc->queue))
245 dwc_dostart(dwc, dwc_first_queued(dwc));
246 list_splice_init(&dwc->active_list, &list);
247 list_splice_init(&dwc->queue, &dwc->active_list);
248
249 list_for_each_entry_safe(desc, _desc, &list, desc_node)
250 dwc_descriptor_complete(dwc, desc);
251}
252
253static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
254{
255 dma_addr_t llp;
256 struct dw_desc *desc, *_desc;
257 struct dw_desc *child;
258 u32 status_xfer;
259
260 /*
261 * Clear block interrupt flag before scanning so that we don't
262 * miss any, and read LLP before RAW_XFER to ensure it is
263 * valid if we decide to scan the list.
264 */
265 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
266 llp = channel_readl(dwc, LLP);
267 status_xfer = dma_readl(dw, RAW.XFER);
268
269 if (status_xfer & dwc->mask) {
270 /* Everything we've submitted is done */
271 dma_writel(dw, CLEAR.XFER, dwc->mask);
272 dwc_complete_all(dw, dwc);
273 return;
274 }
275
276 dev_vdbg(&dwc->chan.dev, "scan_descriptors: llp=0x%x\n", llp);
277
278 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
279 if (desc->lli.llp == llp)
280 /* This one is currently in progress */
281 return;
282
283 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
284 if (child->lli.llp == llp)
285 /* Currently in progress */
286 return;
287
288 /*
289 * No descriptors so far seem to be in progress, i.e.
290 * this one must be done.
291 */
292 dwc_descriptor_complete(dwc, desc);
293 }
294
295 dev_err(&dwc->chan.dev,
296 "BUG: All descriptors done, but channel not idle!\n");
297
298 /* Try to continue after resetting the channel... */
299 channel_clear_bit(dw, CH_EN, dwc->mask);
300 while (dma_readl(dw, CH_EN) & dwc->mask)
301 cpu_relax();
302
303 if (!list_empty(&dwc->queue)) {
304 dwc_dostart(dwc, dwc_first_queued(dwc));
305 list_splice_init(&dwc->queue, &dwc->active_list);
306 }
307}
308
309static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
310{
311 dev_printk(KERN_CRIT, &dwc->chan.dev,
312 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
313 lli->sar, lli->dar, lli->llp,
314 lli->ctlhi, lli->ctllo);
315}
316
317static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
318{
319 struct dw_desc *bad_desc;
320 struct dw_desc *child;
321
322 dwc_scan_descriptors(dw, dwc);
323
324 /*
325 * The descriptor currently at the head of the active list is
326 * borked. Since we don't have any way to report errors, we'll
327 * just have to scream loudly and try to carry on.
328 */
329 bad_desc = dwc_first_active(dwc);
330 list_del_init(&bad_desc->desc_node);
331 list_splice_init(&dwc->queue, dwc->active_list.prev);
332
333 /* Clear the error flag and try to restart the controller */
334 dma_writel(dw, CLEAR.ERROR, dwc->mask);
335 if (!list_empty(&dwc->active_list))
336 dwc_dostart(dwc, dwc_first_active(dwc));
337
338 /*
339 * KERN_CRITICAL may seem harsh, but since this only happens
340 * when someone submits a bad physical address in a
341 * descriptor, we should consider ourselves lucky that the
342 * controller flagged an error instead of scribbling over
343 * random memory locations.
344 */
345 dev_printk(KERN_CRIT, &dwc->chan.dev,
346 "Bad descriptor submitted for DMA!\n");
347 dev_printk(KERN_CRIT, &dwc->chan.dev,
348 " cookie: %d\n", bad_desc->txd.cookie);
349 dwc_dump_lli(dwc, &bad_desc->lli);
350 list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node)
351 dwc_dump_lli(dwc, &child->lli);
352
353 /* Pretend the descriptor completed successfully */
354 dwc_descriptor_complete(dwc, bad_desc);
355}
356
357static void dw_dma_tasklet(unsigned long data)
358{
359 struct dw_dma *dw = (struct dw_dma *)data;
360 struct dw_dma_chan *dwc;
361 u32 status_block;
362 u32 status_xfer;
363 u32 status_err;
364 int i;
365
366 status_block = dma_readl(dw, RAW.BLOCK);
367 status_xfer = dma_readl(dw, RAW.BLOCK);
368 status_err = dma_readl(dw, RAW.ERROR);
369
370 dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n",
371 status_block, status_err);
372
373 for (i = 0; i < dw->dma.chancnt; i++) {
374 dwc = &dw->chan[i];
375 spin_lock(&dwc->lock);
376 if (status_err & (1 << i))
377 dwc_handle_error(dw, dwc);
378 else if ((status_block | status_xfer) & (1 << i))
379 dwc_scan_descriptors(dw, dwc);
380 spin_unlock(&dwc->lock);
381 }
382
383 /*
384 * Re-enable interrupts. Block Complete interrupts are only
385 * enabled if the INT_EN bit in the descriptor is set. This
386 * will trigger a scan before the whole list is done.
387 */
388 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
389 channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
390 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
391}
392
393static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
394{
395 struct dw_dma *dw = dev_id;
396 u32 status;
397
398 dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n",
399 dma_readl(dw, STATUS_INT));
400
401 /*
402 * Just disable the interrupts. We'll turn them back on in the
403 * softirq handler.
404 */
405 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
406 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
407 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
408
409 status = dma_readl(dw, STATUS_INT);
410 if (status) {
411 dev_err(dw->dma.dev,
412 "BUG: Unexpected interrupts pending: 0x%x\n",
413 status);
414
415 /* Try to recover */
416 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
417 channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
418 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
419 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
420 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
421 }
422
423 tasklet_schedule(&dw->tasklet);
424
425 return IRQ_HANDLED;
426}
427
428/*----------------------------------------------------------------------*/
429
430static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
431{
432 struct dw_desc *desc = txd_to_dw_desc(tx);
433 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
434 dma_cookie_t cookie;
435
436 spin_lock_bh(&dwc->lock);
437 cookie = dwc_assign_cookie(dwc, desc);
438
439 /*
440 * REVISIT: We should attempt to chain as many descriptors as
441 * possible, perhaps even appending to those already submitted
442 * for DMA. But this is hard to do in a race-free manner.
443 */
444 if (list_empty(&dwc->active_list)) {
445 dev_vdbg(&tx->chan->dev, "tx_submit: started %u\n",
446 desc->txd.cookie);
447 dwc_dostart(dwc, desc);
448 list_add_tail(&desc->desc_node, &dwc->active_list);
449 } else {
450 dev_vdbg(&tx->chan->dev, "tx_submit: queued %u\n",
451 desc->txd.cookie);
452
453 list_add_tail(&desc->desc_node, &dwc->queue);
454 }
455
456 spin_unlock_bh(&dwc->lock);
457
458 return cookie;
459}
460
461static struct dma_async_tx_descriptor *
462dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
463 size_t len, unsigned long flags)
464{
465 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
466 struct dw_desc *desc;
467 struct dw_desc *first;
468 struct dw_desc *prev;
469 size_t xfer_count;
470 size_t offset;
471 unsigned int src_width;
472 unsigned int dst_width;
473 u32 ctllo;
474
475 dev_vdbg(&chan->dev, "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
476 dest, src, len, flags);
477
478 if (unlikely(!len)) {
479 dev_dbg(&chan->dev, "prep_dma_memcpy: length is zero!\n");
480 return NULL;
481 }
482
483 /*
484 * We can be a lot more clever here, but this should take care
485 * of the most common optimization.
486 */
487 if (!((src | dest | len) & 3))
488 src_width = dst_width = 2;
489 else if (!((src | dest | len) & 1))
490 src_width = dst_width = 1;
491 else
492 src_width = dst_width = 0;
493
494 ctllo = DWC_DEFAULT_CTLLO
495 | DWC_CTLL_DST_WIDTH(dst_width)
496 | DWC_CTLL_SRC_WIDTH(src_width)
497 | DWC_CTLL_DST_INC
498 | DWC_CTLL_SRC_INC
499 | DWC_CTLL_FC_M2M;
500 prev = first = NULL;
501
502 for (offset = 0; offset < len; offset += xfer_count << src_width) {
503 xfer_count = min_t(size_t, (len - offset) >> src_width,
504 DWC_MAX_COUNT);
505
506 desc = dwc_desc_get(dwc);
507 if (!desc)
508 goto err_desc_get;
509
510 desc->lli.sar = src + offset;
511 desc->lli.dar = dest + offset;
512 desc->lli.ctllo = ctllo;
513 desc->lli.ctlhi = xfer_count;
514
515 if (!first) {
516 first = desc;
517 } else {
518 prev->lli.llp = desc->txd.phys;
519 dma_sync_single_for_device(chan->dev.parent,
520 prev->txd.phys, sizeof(prev->lli),
521 DMA_TO_DEVICE);
522 list_add_tail(&desc->desc_node,
523 &first->txd.tx_list);
524 }
525 prev = desc;
526 }
527
528
529 if (flags & DMA_PREP_INTERRUPT)
530 /* Trigger interrupt after last block */
531 prev->lli.ctllo |= DWC_CTLL_INT_EN;
532
533 prev->lli.llp = 0;
534 dma_sync_single_for_device(chan->dev.parent,
535 prev->txd.phys, sizeof(prev->lli),
536 DMA_TO_DEVICE);
537
538 first->txd.flags = flags;
539 first->len = len;
540
541 return &first->txd;
542
543err_desc_get:
544 dwc_desc_put(dwc, first);
545 return NULL;
546}
547
548static struct dma_async_tx_descriptor *
549dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
550 unsigned int sg_len, enum dma_data_direction direction,
551 unsigned long flags)
552{
553 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
554 struct dw_dma_slave *dws = dwc->dws;
555 struct dw_desc *prev;
556 struct dw_desc *first;
557 u32 ctllo;
558 dma_addr_t reg;
559 unsigned int reg_width;
560 unsigned int mem_width;
561 unsigned int i;
562 struct scatterlist *sg;
563 size_t total_len = 0;
564
565 dev_vdbg(&chan->dev, "prep_dma_slave\n");
566
567 if (unlikely(!dws || !sg_len))
568 return NULL;
569
570 reg_width = dws->slave.reg_width;
571 prev = first = NULL;
572
573 sg_len = dma_map_sg(chan->dev.parent, sgl, sg_len, direction);
574
575 switch (direction) {
576 case DMA_TO_DEVICE:
577 ctllo = (DWC_DEFAULT_CTLLO
578 | DWC_CTLL_DST_WIDTH(reg_width)
579 | DWC_CTLL_DST_FIX
580 | DWC_CTLL_SRC_INC
581 | DWC_CTLL_FC_M2P);
582 reg = dws->slave.tx_reg;
583 for_each_sg(sgl, sg, sg_len, i) {
584 struct dw_desc *desc;
585 u32 len;
586 u32 mem;
587
588 desc = dwc_desc_get(dwc);
589 if (!desc) {
590 dev_err(&chan->dev,
591 "not enough descriptors available\n");
592 goto err_desc_get;
593 }
594
595 mem = sg_phys(sg);
596 len = sg_dma_len(sg);
597 mem_width = 2;
598 if (unlikely(mem & 3 || len & 3))
599 mem_width = 0;
600
601 desc->lli.sar = mem;
602 desc->lli.dar = reg;
603 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
604 desc->lli.ctlhi = len >> mem_width;
605
606 if (!first) {
607 first = desc;
608 } else {
609 prev->lli.llp = desc->txd.phys;
610 dma_sync_single_for_device(chan->dev.parent,
611 prev->txd.phys,
612 sizeof(prev->lli),
613 DMA_TO_DEVICE);
614 list_add_tail(&desc->desc_node,
615 &first->txd.tx_list);
616 }
617 prev = desc;
618 total_len += len;
619 }
620 break;
621 case DMA_FROM_DEVICE:
622 ctllo = (DWC_DEFAULT_CTLLO
623 | DWC_CTLL_SRC_WIDTH(reg_width)
624 | DWC_CTLL_DST_INC
625 | DWC_CTLL_SRC_FIX
626 | DWC_CTLL_FC_P2M);
627
628 reg = dws->slave.rx_reg;
629 for_each_sg(sgl, sg, sg_len, i) {
630 struct dw_desc *desc;
631 u32 len;
632 u32 mem;
633
634 desc = dwc_desc_get(dwc);
635 if (!desc) {
636 dev_err(&chan->dev,
637 "not enough descriptors available\n");
638 goto err_desc_get;
639 }
640
641 mem = sg_phys(sg);
642 len = sg_dma_len(sg);
643 mem_width = 2;
644 if (unlikely(mem & 3 || len & 3))
645 mem_width = 0;
646
647 desc->lli.sar = reg;
648 desc->lli.dar = mem;
649 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
650 desc->lli.ctlhi = len >> reg_width;
651
652 if (!first) {
653 first = desc;
654 } else {
655 prev->lli.llp = desc->txd.phys;
656 dma_sync_single_for_device(chan->dev.parent,
657 prev->txd.phys,
658 sizeof(prev->lli),
659 DMA_TO_DEVICE);
660 list_add_tail(&desc->desc_node,
661 &first->txd.tx_list);
662 }
663 prev = desc;
664 total_len += len;
665 }
666 break;
667 default:
668 return NULL;
669 }
670
671 if (flags & DMA_PREP_INTERRUPT)
672 /* Trigger interrupt after last block */
673 prev->lli.ctllo |= DWC_CTLL_INT_EN;
674
675 prev->lli.llp = 0;
676 dma_sync_single_for_device(chan->dev.parent,
677 prev->txd.phys, sizeof(prev->lli),
678 DMA_TO_DEVICE);
679
680 first->len = total_len;
681
682 return &first->txd;
683
684err_desc_get:
685 dwc_desc_put(dwc, first);
686 return NULL;
687}
688
689static void dwc_terminate_all(struct dma_chan *chan)
690{
691 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
692 struct dw_dma *dw = to_dw_dma(chan->device);
693 struct dw_desc *desc, *_desc;
694 LIST_HEAD(list);
695
696 /*
697 * This is only called when something went wrong elsewhere, so
698 * we don't really care about the data. Just disable the
699 * channel. We still have to poll the channel enable bit due
700 * to AHB/HSB limitations.
701 */
702 spin_lock_bh(&dwc->lock);
703
704 channel_clear_bit(dw, CH_EN, dwc->mask);
705
706 while (dma_readl(dw, CH_EN) & dwc->mask)
707 cpu_relax();
708
709 /* active_list entries will end up before queued entries */
710 list_splice_init(&dwc->queue, &list);
711 list_splice_init(&dwc->active_list, &list);
712
713 spin_unlock_bh(&dwc->lock);
714
715 /* Flush all pending and queued descriptors */
716 list_for_each_entry_safe(desc, _desc, &list, desc_node)
717 dwc_descriptor_complete(dwc, desc);
718}
719
720static enum dma_status
721dwc_is_tx_complete(struct dma_chan *chan,
722 dma_cookie_t cookie,
723 dma_cookie_t *done, dma_cookie_t *used)
724{
725 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
726 dma_cookie_t last_used;
727 dma_cookie_t last_complete;
728 int ret;
729
730 last_complete = dwc->completed;
731 last_used = chan->cookie;
732
733 ret = dma_async_is_complete(cookie, last_complete, last_used);
734 if (ret != DMA_SUCCESS) {
735 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
736
737 last_complete = dwc->completed;
738 last_used = chan->cookie;
739
740 ret = dma_async_is_complete(cookie, last_complete, last_used);
741 }
742
743 if (done)
744 *done = last_complete;
745 if (used)
746 *used = last_used;
747
748 return ret;
749}
750
751static void dwc_issue_pending(struct dma_chan *chan)
752{
753 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
754
755 spin_lock_bh(&dwc->lock);
756 if (!list_empty(&dwc->queue))
757 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
758 spin_unlock_bh(&dwc->lock);
759}
760
761static int dwc_alloc_chan_resources(struct dma_chan *chan,
762 struct dma_client *client)
763{
764 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
765 struct dw_dma *dw = to_dw_dma(chan->device);
766 struct dw_desc *desc;
767 struct dma_slave *slave;
768 struct dw_dma_slave *dws;
769 int i;
770 u32 cfghi;
771 u32 cfglo;
772
773 dev_vdbg(&chan->dev, "alloc_chan_resources\n");
774
775 /* Channels doing slave DMA can only handle one client. */
776 if (dwc->dws || client->slave) {
777 if (chan->client_count)
778 return -EBUSY;
779 }
780
781 /* ASSERT: channel is idle */
782 if (dma_readl(dw, CH_EN) & dwc->mask) {
783 dev_dbg(&chan->dev, "DMA channel not idle?\n");
784 return -EIO;
785 }
786
787 dwc->completed = chan->cookie = 1;
788
789 cfghi = DWC_CFGH_FIFO_MODE;
790 cfglo = 0;
791
792 slave = client->slave;
793 if (slave) {
794 /*
795 * We need controller-specific data to set up slave
796 * transfers.
797 */
798 BUG_ON(!slave->dma_dev || slave->dma_dev != dw->dma.dev);
799
800 dws = container_of(slave, struct dw_dma_slave, slave);
801
802 dwc->dws = dws;
803 cfghi = dws->cfg_hi;
804 cfglo = dws->cfg_lo;
805 } else {
806 dwc->dws = NULL;
807 }
808
809 channel_writel(dwc, CFG_LO, cfglo);
810 channel_writel(dwc, CFG_HI, cfghi);
811
812 /*
813 * NOTE: some controllers may have additional features that we
814 * need to initialize here, like "scatter-gather" (which
815 * doesn't mean what you think it means), and status writeback.
816 */
817
818 spin_lock_bh(&dwc->lock);
819 i = dwc->descs_allocated;
820 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
821 spin_unlock_bh(&dwc->lock);
822
823 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
824 if (!desc) {
825 dev_info(&chan->dev,
826 "only allocated %d descriptors\n", i);
827 spin_lock_bh(&dwc->lock);
828 break;
829 }
830
831 dma_async_tx_descriptor_init(&desc->txd, chan);
832 desc->txd.tx_submit = dwc_tx_submit;
833 desc->txd.flags = DMA_CTRL_ACK;
834 INIT_LIST_HEAD(&desc->txd.tx_list);
835 desc->txd.phys = dma_map_single(chan->dev.parent, &desc->lli,
836 sizeof(desc->lli), DMA_TO_DEVICE);
837 dwc_desc_put(dwc, desc);
838
839 spin_lock_bh(&dwc->lock);
840 i = ++dwc->descs_allocated;
841 }
842
843 /* Enable interrupts */
844 channel_set_bit(dw, MASK.XFER, dwc->mask);
845 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
846 channel_set_bit(dw, MASK.ERROR, dwc->mask);
847
848 spin_unlock_bh(&dwc->lock);
849
850 dev_dbg(&chan->dev,
851 "alloc_chan_resources allocated %d descriptors\n", i);
852
853 return i;
854}
855
856static void dwc_free_chan_resources(struct dma_chan *chan)
857{
858 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
859 struct dw_dma *dw = to_dw_dma(chan->device);
860 struct dw_desc *desc, *_desc;
861 LIST_HEAD(list);
862
863 dev_dbg(&chan->dev, "free_chan_resources (descs allocated=%u)\n",
864 dwc->descs_allocated);
865
866 /* ASSERT: channel is idle */
867 BUG_ON(!list_empty(&dwc->active_list));
868 BUG_ON(!list_empty(&dwc->queue));
869 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
870
871 spin_lock_bh(&dwc->lock);
872 list_splice_init(&dwc->free_list, &list);
873 dwc->descs_allocated = 0;
874 dwc->dws = NULL;
875
876 /* Disable interrupts */
877 channel_clear_bit(dw, MASK.XFER, dwc->mask);
878 channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
879 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
880
881 spin_unlock_bh(&dwc->lock);
882
883 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
884 dev_vdbg(&chan->dev, " freeing descriptor %p\n", desc);
885 dma_unmap_single(chan->dev.parent, desc->txd.phys,
886 sizeof(desc->lli), DMA_TO_DEVICE);
887 kfree(desc);
888 }
889
890 dev_vdbg(&chan->dev, "free_chan_resources done\n");
891}
892
893/*----------------------------------------------------------------------*/
894
895static void dw_dma_off(struct dw_dma *dw)
896{
897 dma_writel(dw, CFG, 0);
898
899 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
900 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
901 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
902 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
903 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
904
905 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
906 cpu_relax();
907}
908
909static int __init dw_probe(struct platform_device *pdev)
910{
911 struct dw_dma_platform_data *pdata;
912 struct resource *io;
913 struct dw_dma *dw;
914 size_t size;
915 int irq;
916 int err;
917 int i;
918
919 pdata = pdev->dev.platform_data;
920 if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
921 return -EINVAL;
922
923 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
924 if (!io)
925 return -EINVAL;
926
927 irq = platform_get_irq(pdev, 0);
928 if (irq < 0)
929 return irq;
930
931 size = sizeof(struct dw_dma);
932 size += pdata->nr_channels * sizeof(struct dw_dma_chan);
933 dw = kzalloc(size, GFP_KERNEL);
934 if (!dw)
935 return -ENOMEM;
936
937 if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) {
938 err = -EBUSY;
939 goto err_kfree;
940 }
941
942 memset(dw, 0, sizeof *dw);
943
944 dw->regs = ioremap(io->start, DW_REGLEN);
945 if (!dw->regs) {
946 err = -ENOMEM;
947 goto err_release_r;
948 }
949
950 dw->clk = clk_get(&pdev->dev, "hclk");
951 if (IS_ERR(dw->clk)) {
952 err = PTR_ERR(dw->clk);
953 goto err_clk;
954 }
955 clk_enable(dw->clk);
956
957 /* force dma off, just in case */
958 dw_dma_off(dw);
959
960 err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw);
961 if (err)
962 goto err_irq;
963
964 platform_set_drvdata(pdev, dw);
965
966 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
967
968 dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
969
970 INIT_LIST_HEAD(&dw->dma.channels);
971 for (i = 0; i < pdata->nr_channels; i++, dw->dma.chancnt++) {
972 struct dw_dma_chan *dwc = &dw->chan[i];
973
974 dwc->chan.device = &dw->dma;
975 dwc->chan.cookie = dwc->completed = 1;
976 dwc->chan.chan_id = i;
977 list_add_tail(&dwc->chan.device_node, &dw->dma.channels);
978
979 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
980 spin_lock_init(&dwc->lock);
981 dwc->mask = 1 << i;
982
983 INIT_LIST_HEAD(&dwc->active_list);
984 INIT_LIST_HEAD(&dwc->queue);
985 INIT_LIST_HEAD(&dwc->free_list);
986
987 channel_clear_bit(dw, CH_EN, dwc->mask);
988 }
989
990 /* Clear/disable all interrupts on all channels. */
991 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
992 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
993 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
994 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
995 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
996
997 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
998 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
999 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1000 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1001 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1002
1003 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1004 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1005 dw->dma.dev = &pdev->dev;
1006 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1007 dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1008
1009 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1010
1011 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1012 dw->dma.device_terminate_all = dwc_terminate_all;
1013
1014 dw->dma.device_is_tx_complete = dwc_is_tx_complete;
1015 dw->dma.device_issue_pending = dwc_issue_pending;
1016
1017 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1018
1019 printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
1020 pdev->dev.bus_id, dw->dma.chancnt);
1021
1022 dma_async_device_register(&dw->dma);
1023
1024 return 0;
1025
1026err_irq:
1027 clk_disable(dw->clk);
1028 clk_put(dw->clk);
1029err_clk:
1030 iounmap(dw->regs);
1031 dw->regs = NULL;
1032err_release_r:
1033 release_resource(io);
1034err_kfree:
1035 kfree(dw);
1036 return err;
1037}
1038
1039static int __exit dw_remove(struct platform_device *pdev)
1040{
1041 struct dw_dma *dw = platform_get_drvdata(pdev);
1042 struct dw_dma_chan *dwc, *_dwc;
1043 struct resource *io;
1044
1045 dw_dma_off(dw);
1046 dma_async_device_unregister(&dw->dma);
1047
1048 free_irq(platform_get_irq(pdev, 0), dw);
1049 tasklet_kill(&dw->tasklet);
1050
1051 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1052 chan.device_node) {
1053 list_del(&dwc->chan.device_node);
1054 channel_clear_bit(dw, CH_EN, dwc->mask);
1055 }
1056
1057 clk_disable(dw->clk);
1058 clk_put(dw->clk);
1059
1060 iounmap(dw->regs);
1061 dw->regs = NULL;
1062
1063 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1064 release_mem_region(io->start, DW_REGLEN);
1065
1066 kfree(dw);
1067
1068 return 0;
1069}
1070
1071static void dw_shutdown(struct platform_device *pdev)
1072{
1073 struct dw_dma *dw = platform_get_drvdata(pdev);
1074
1075 dw_dma_off(platform_get_drvdata(pdev));
1076 clk_disable(dw->clk);
1077}
1078
1079static int dw_suspend_late(struct platform_device *pdev, pm_message_t mesg)
1080{
1081 struct dw_dma *dw = platform_get_drvdata(pdev);
1082
1083 dw_dma_off(platform_get_drvdata(pdev));
1084 clk_disable(dw->clk);
1085 return 0;
1086}
1087
1088static int dw_resume_early(struct platform_device *pdev)
1089{
1090 struct dw_dma *dw = platform_get_drvdata(pdev);
1091
1092 clk_enable(dw->clk);
1093 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1094 return 0;
1095
1096}
1097
1098static struct platform_driver dw_driver = {
1099 .remove = __exit_p(dw_remove),
1100 .shutdown = dw_shutdown,
1101 .suspend_late = dw_suspend_late,
1102 .resume_early = dw_resume_early,
1103 .driver = {
1104 .name = "dw_dmac",
1105 },
1106};
1107
1108static int __init dw_init(void)
1109{
1110 return platform_driver_probe(&dw_driver, dw_probe);
1111}
1112module_init(dw_init);
1113
1114static void __exit dw_exit(void)
1115{
1116 platform_driver_unregister(&dw_driver);
1117}
1118module_exit(dw_exit);
1119
1120MODULE_LICENSE("GPL v2");
1121MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1122MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>");
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
new file mode 100644
index 000000000000..00fdd187bb0c
--- /dev/null
+++ b/drivers/dma/dw_dmac_regs.h
@@ -0,0 +1,225 @@
1/*
2 * Driver for the Synopsys DesignWare AHB DMA Controller
3 *
4 * Copyright (C) 2005-2007 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/dw_dmac.h>
12
13#define DW_DMA_MAX_NR_CHANNELS 8
14
15/*
16 * Redefine this macro to handle differences between 32- and 64-bit
17 * addressing, big vs. little endian, etc.
18 */
19#define DW_REG(name) u32 name; u32 __pad_##name
20
21/* Hardware register definitions. */
22struct dw_dma_chan_regs {
23 DW_REG(SAR); /* Source Address Register */
24 DW_REG(DAR); /* Destination Address Register */
25 DW_REG(LLP); /* Linked List Pointer */
26 u32 CTL_LO; /* Control Register Low */
27 u32 CTL_HI; /* Control Register High */
28 DW_REG(SSTAT);
29 DW_REG(DSTAT);
30 DW_REG(SSTATAR);
31 DW_REG(DSTATAR);
32 u32 CFG_LO; /* Configuration Register Low */
33 u32 CFG_HI; /* Configuration Register High */
34 DW_REG(SGR);
35 DW_REG(DSR);
36};
37
38struct dw_dma_irq_regs {
39 DW_REG(XFER);
40 DW_REG(BLOCK);
41 DW_REG(SRC_TRAN);
42 DW_REG(DST_TRAN);
43 DW_REG(ERROR);
44};
45
46struct dw_dma_regs {
47 /* per-channel registers */
48 struct dw_dma_chan_regs CHAN[DW_DMA_MAX_NR_CHANNELS];
49
50 /* irq handling */
51 struct dw_dma_irq_regs RAW; /* r */
52 struct dw_dma_irq_regs STATUS; /* r (raw & mask) */
53 struct dw_dma_irq_regs MASK; /* rw (set = irq enabled) */
54 struct dw_dma_irq_regs CLEAR; /* w (ack, affects "raw") */
55
56 DW_REG(STATUS_INT); /* r */
57
58 /* software handshaking */
59 DW_REG(REQ_SRC);
60 DW_REG(REQ_DST);
61 DW_REG(SGL_REQ_SRC);
62 DW_REG(SGL_REQ_DST);
63 DW_REG(LAST_SRC);
64 DW_REG(LAST_DST);
65
66 /* miscellaneous */
67 DW_REG(CFG);
68 DW_REG(CH_EN);
69 DW_REG(ID);
70 DW_REG(TEST);
71
72 /* optional encoded params, 0x3c8..0x3 */
73};
74
75/* Bitfields in CTL_LO */
76#define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */
77#define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */
78#define DWC_CTLL_SRC_WIDTH(n) ((n)<<4)
79#define DWC_CTLL_DST_INC (0<<7) /* DAR update/not */
80#define DWC_CTLL_DST_DEC (1<<7)
81#define DWC_CTLL_DST_FIX (2<<7)
82#define DWC_CTLL_SRC_INC (0<<7) /* SAR update/not */
83#define DWC_CTLL_SRC_DEC (1<<9)
84#define DWC_CTLL_SRC_FIX (2<<9)
85#define DWC_CTLL_DST_MSIZE(n) ((n)<<11) /* burst, #elements */
86#define DWC_CTLL_SRC_MSIZE(n) ((n)<<14)
87#define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */
88#define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */
89#define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */
90#define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */
91#define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */
92#define DWC_CTLL_FC_P2P (3 << 20) /* periph-to-periph */
93/* plus 4 transfer types for peripheral-as-flow-controller */
94#define DWC_CTLL_DMS(n) ((n)<<23) /* dst master select */
95#define DWC_CTLL_SMS(n) ((n)<<25) /* src master select */
96#define DWC_CTLL_LLP_D_EN (1 << 27) /* dest block chain */
97#define DWC_CTLL_LLP_S_EN (1 << 28) /* src block chain */
98
99/* Bitfields in CTL_HI */
100#define DWC_CTLH_DONE 0x00001000
101#define DWC_CTLH_BLOCK_TS_MASK 0x00000fff
102
103/* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */
104#define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */
105#define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */
106#define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */
107#define DWC_CFGL_HS_SRC (1 << 11) /* handshake w/src */
108#define DWC_CFGL_MAX_BURST(x) ((x) << 20)
109#define DWC_CFGL_RELOAD_SAR (1 << 30)
110#define DWC_CFGL_RELOAD_DAR (1 << 31)
111
112/* Bitfields in CFG_HI. Platform-configurable bits are in <linux/dw_dmac.h> */
113#define DWC_CFGH_DS_UPD_EN (1 << 5)
114#define DWC_CFGH_SS_UPD_EN (1 << 6)
115
116/* Bitfields in SGR */
117#define DWC_SGR_SGI(x) ((x) << 0)
118#define DWC_SGR_SGC(x) ((x) << 20)
119
120/* Bitfields in DSR */
121#define DWC_DSR_DSI(x) ((x) << 0)
122#define DWC_DSR_DSC(x) ((x) << 20)
123
124/* Bitfields in CFG */
125#define DW_CFG_DMA_EN (1 << 0)
126
127#define DW_REGLEN 0x400
128
129struct dw_dma_chan {
130 struct dma_chan chan;
131 void __iomem *ch_regs;
132 u8 mask;
133
134 spinlock_t lock;
135
136 /* these other elements are all protected by lock */
137 dma_cookie_t completed;
138 struct list_head active_list;
139 struct list_head queue;
140 struct list_head free_list;
141
142 struct dw_dma_slave *dws;
143
144 unsigned int descs_allocated;
145};
146
147static inline struct dw_dma_chan_regs __iomem *
148__dwc_regs(struct dw_dma_chan *dwc)
149{
150 return dwc->ch_regs;
151}
152
153#define channel_readl(dwc, name) \
154 __raw_readl(&(__dwc_regs(dwc)->name))
155#define channel_writel(dwc, name, val) \
156 __raw_writel((val), &(__dwc_regs(dwc)->name))
157
158static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
159{
160 return container_of(chan, struct dw_dma_chan, chan);
161}
162
163
164struct dw_dma {
165 struct dma_device dma;
166 void __iomem *regs;
167 struct tasklet_struct tasklet;
168 struct clk *clk;
169
170 u8 all_chan_mask;
171
172 struct dw_dma_chan chan[0];
173};
174
175static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
176{
177 return dw->regs;
178}
179
180#define dma_readl(dw, name) \
181 __raw_readl(&(__dw_regs(dw)->name))
182#define dma_writel(dw, name, val) \
183 __raw_writel((val), &(__dw_regs(dw)->name))
184
185#define channel_set_bit(dw, reg, mask) \
186 dma_writel(dw, reg, ((mask) << 8) | (mask))
187#define channel_clear_bit(dw, reg, mask) \
188 dma_writel(dw, reg, ((mask) << 8) | 0)
189
190static inline struct dw_dma *to_dw_dma(struct dma_device *ddev)
191{
192 return container_of(ddev, struct dw_dma, dma);
193}
194
195/* LLI == Linked List Item; a.k.a. DMA block descriptor */
196struct dw_lli {
197 /* values that are not changed by hardware */
198 dma_addr_t sar;
199 dma_addr_t dar;
200 dma_addr_t llp; /* chain to next lli */
201 u32 ctllo;
202 /* values that may get written back: */
203 u32 ctlhi;
204 /* sstat and dstat can snapshot peripheral register state.
205 * silicon config may discard either or both...
206 */
207 u32 sstat;
208 u32 dstat;
209};
210
211struct dw_desc {
212 /* FIRST values the hardware uses */
213 struct dw_lli lli;
214
215 /* THEN values for driver housekeeping */
216 struct list_head desc_node;
217 struct dma_async_tx_descriptor txd;
218 size_t len;
219};
220
221static inline struct dw_desc *
222txd_to_dw_desc(struct dma_async_tx_descriptor *txd)
223{
224 return container_of(txd, struct dw_desc, txd);
225}
diff --git a/include/asm-avr32/arch-at32ap/at32ap700x.h b/include/asm-avr32/arch-at32ap/at32ap700x.h
index 31e48b0e7324..d18a3053be0d 100644
--- a/include/asm-avr32/arch-at32ap/at32ap700x.h
+++ b/include/asm-avr32/arch-at32ap/at32ap700x.h
@@ -30,4 +30,20 @@
30#define GPIO_PIN_PD(N) (GPIO_PIOD_BASE + (N)) 30#define GPIO_PIN_PD(N) (GPIO_PIOD_BASE + (N))
31#define GPIO_PIN_PE(N) (GPIO_PIOE_BASE + (N)) 31#define GPIO_PIN_PE(N) (GPIO_PIOE_BASE + (N))
32 32
33
34/*
35 * DMAC peripheral hardware handshaking interfaces, used with dw_dmac
36 */
37#define DMAC_MCI_RX 0
38#define DMAC_MCI_TX 1
39#define DMAC_DAC_TX 2
40#define DMAC_AC97_A_RX 3
41#define DMAC_AC97_A_TX 4
42#define DMAC_AC97_B_RX 5
43#define DMAC_AC97_B_TX 6
44#define DMAC_DMAREQ_0 7
45#define DMAC_DMAREQ_1 8
46#define DMAC_DMAREQ_2 9
47#define DMAC_DMAREQ_3 10
48
33#endif /* __ASM_ARCH_AT32AP700X_H__ */ 49#endif /* __ASM_ARCH_AT32AP700X_H__ */
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
new file mode 100644
index 000000000000..04d217b442bf
--- /dev/null
+++ b/include/linux/dw_dmac.h
@@ -0,0 +1,62 @@
1/*
2 * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
3 * AVR32 systems.)
4 *
5 * Copyright (C) 2007 Atmel Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef DW_DMAC_H
12#define DW_DMAC_H
13
14#include <linux/dmaengine.h>
15
16/**
17 * struct dw_dma_platform_data - Controller configuration parameters
18 * @nr_channels: Number of channels supported by hardware (max 8)
19 */
20struct dw_dma_platform_data {
21 unsigned int nr_channels;
22};
23
24/**
25 * struct dw_dma_slave - Controller-specific information about a slave
26 * @slave: Generic information about the slave
27 * @ctl_lo: Platform-specific initializer for the CTL_LO register
28 * @cfg_hi: Platform-specific initializer for the CFG_HI register
29 * @cfg_lo: Platform-specific initializer for the CFG_LO register
30 */
31struct dw_dma_slave {
32 struct dma_slave slave;
33 u32 cfg_hi;
34 u32 cfg_lo;
35};
36
37/* Platform-configurable bits in CFG_HI */
38#define DWC_CFGH_FCMODE (1 << 0)
39#define DWC_CFGH_FIFO_MODE (1 << 1)
40#define DWC_CFGH_PROTCTL(x) ((x) << 2)
41#define DWC_CFGH_SRC_PER(x) ((x) << 7)
42#define DWC_CFGH_DST_PER(x) ((x) << 11)
43
44/* Platform-configurable bits in CFG_LO */
45#define DWC_CFGL_PRIO(x) ((x) << 5) /* priority */
46#define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */
47#define DWC_CFGL_LOCK_CH_BLOCK (1 << 12)
48#define DWC_CFGL_LOCK_CH_XACT (2 << 12)
49#define DWC_CFGL_LOCK_BUS_XFER (0 << 14) /* scope of LOCK_BUS */
50#define DWC_CFGL_LOCK_BUS_BLOCK (1 << 14)
51#define DWC_CFGL_LOCK_BUS_XACT (2 << 14)
52#define DWC_CFGL_LOCK_CH (1 << 15) /* channel lockout */
53#define DWC_CFGL_LOCK_BUS (1 << 16) /* busmaster lockout */
54#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */
55#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */
56
57static inline struct dw_dma_slave *to_dw_dma_slave(struct dma_slave *slave)
58{
59 return container_of(slave, struct dw_dma_slave, slave);
60}
61
62#endif /* DW_DMAC_H */