aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorSascha Hauer <s.hauer@pengutronix.de>2010-09-30 09:56:34 -0400
committerDan Williams <dan.j.williams@intel.com>2010-10-05 18:49:26 -0400
commit1ec1e82f2510e2bdcb6268ed74aa79e1a7bc9594 (patch)
treef274f0b9ff704416492fe420174e65b9b640eff2 /drivers/dma
parent6e3ecaf0ad49de0bed829d409a164e7107c02993 (diff)
dmaengine: Add Freescale i.MX SDMA support
This patch adds support for the Freescale i.MX SDMA engine. The SDMA engine is a scatter/gather DMA engine which is implemented as a seperate coprocessor. SDMA needs its own firmware which is requested using the standard request_firmware mechanism. The firmware has different entry points for each peripheral type, so drivers have to pass the peripheral type to the DMA engine which in turn picks the correct firmware entry point from a table contained in the firmware image itself. The original Freescale code also supports support for transfering data to the internal SRAM which needs different entry points to the firmware. Support for this is currently not implemented. Also, support for the ASRC (asymmetric sample rate converter) is skipped. I took a very simple approach to implement dmaengine support. Only a single descriptor is statically assigned to a each channel. This means that transfers can't be queued up but only a single transfer is in progress. This simplifies implementation a lot and is sufficient for the usual device/memory transfers. Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de> Reviewed-by: Linus Walleij <linus.ml.walleij@gmail.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig8
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/imx-sdma.c1392
3 files changed, 1401 insertions, 0 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 9520cf02edc8..3cf1d123f2d3 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -195,6 +195,14 @@ config PCH_DMA
195 help 195 help
196 Enable support for the Topcliff PCH DMA engine. 196 Enable support for the Topcliff PCH DMA engine.
197 197
198config IMX_SDMA
199 tristate "i.MX SDMA support"
200 depends on ARCH_MX25 || ARCH_MX3 || ARCH_MX5
201 select DMA_ENGINE
202 help
203 Support the i.MX SDMA engine. This engine is integrated into
204 Freescale i.MX25/31/35/51 chips.
205
198config DMA_ENGINE 206config DMA_ENGINE
199 bool 207 bool
200 208
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 72bd70384d8a..3ed7babd3a99 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
21obj-$(CONFIG_SH_DMAE) += shdma.o 21obj-$(CONFIG_SH_DMAE) += shdma.o
22obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o 22obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
23obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ 23obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
24obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
24obj-$(CONFIG_TIMB_DMA) += timb_dma.o 25obj-$(CONFIG_TIMB_DMA) += timb_dma.o
25obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o 26obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
26obj-$(CONFIG_PL330_DMA) += pl330.o 27obj-$(CONFIG_PL330_DMA) += pl330.o
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
new file mode 100644
index 000000000000..0834323a0599
--- /dev/null
+++ b/drivers/dma/imx-sdma.c
@@ -0,0 +1,1392 @@
1/*
2 * drivers/dma/imx-sdma.c
3 *
4 * This file contains a driver for the Freescale Smart DMA engine
5 *
6 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
7 *
8 * Based on code from Freescale:
9 *
10 * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
11 *
12 * The code contained herein is licensed under the GNU General Public
13 * License. You may obtain a copy of the GNU General Public License
14 * Version 2 or later at the following locations:
15 *
16 * http://www.opensource.org/licenses/gpl-license.html
17 * http://www.gnu.org/copyleft/gpl.html
18 */
19
20#include <linux/init.h>
21#include <linux/types.h>
22#include <linux/mm.h>
23#include <linux/interrupt.h>
24#include <linux/clk.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
27#include <linux/semaphore.h>
28#include <linux/spinlock.h>
29#include <linux/device.h>
30#include <linux/dma-mapping.h>
31#include <linux/firmware.h>
32#include <linux/slab.h>
33#include <linux/platform_device.h>
34#include <linux/dmaengine.h>
35
36#include <asm/irq.h>
37#include <mach/sdma.h>
38#include <mach/dma.h>
39#include <mach/hardware.h>
40
41/* SDMA registers */
42#define SDMA_H_C0PTR 0x000
43#define SDMA_H_INTR 0x004
44#define SDMA_H_STATSTOP 0x008
45#define SDMA_H_START 0x00c
46#define SDMA_H_EVTOVR 0x010
47#define SDMA_H_DSPOVR 0x014
48#define SDMA_H_HOSTOVR 0x018
49#define SDMA_H_EVTPEND 0x01c
50#define SDMA_H_DSPENBL 0x020
51#define SDMA_H_RESET 0x024
52#define SDMA_H_EVTERR 0x028
53#define SDMA_H_INTRMSK 0x02c
54#define SDMA_H_PSW 0x030
55#define SDMA_H_EVTERRDBG 0x034
56#define SDMA_H_CONFIG 0x038
57#define SDMA_ONCE_ENB 0x040
58#define SDMA_ONCE_DATA 0x044
59#define SDMA_ONCE_INSTR 0x048
60#define SDMA_ONCE_STAT 0x04c
61#define SDMA_ONCE_CMD 0x050
62#define SDMA_EVT_MIRROR 0x054
63#define SDMA_ILLINSTADDR 0x058
64#define SDMA_CHN0ADDR 0x05c
65#define SDMA_ONCE_RTB 0x060
66#define SDMA_XTRIG_CONF1 0x070
67#define SDMA_XTRIG_CONF2 0x074
68#define SDMA_CHNENBL0_V2 0x200
69#define SDMA_CHNENBL0_V1 0x080
70#define SDMA_CHNPRI_0 0x100
71
72/*
73 * Buffer descriptor status values.
74 */
75#define BD_DONE 0x01
76#define BD_WRAP 0x02
77#define BD_CONT 0x04
78#define BD_INTR 0x08
79#define BD_RROR 0x10
80#define BD_LAST 0x20
81#define BD_EXTD 0x80
82
83/*
84 * Data Node descriptor status values.
85 */
86#define DND_END_OF_FRAME 0x80
87#define DND_END_OF_XFER 0x40
88#define DND_DONE 0x20
89#define DND_UNUSED 0x01
90
91/*
92 * IPCV2 descriptor status values.
93 */
94#define BD_IPCV2_END_OF_FRAME 0x40
95
96#define IPCV2_MAX_NODES 50
97/*
98 * Error bit set in the CCB status field by the SDMA,
99 * in setbd routine, in case of a transfer error
100 */
101#define DATA_ERROR 0x10000000
102
103/*
104 * Buffer descriptor commands.
105 */
106#define C0_ADDR 0x01
107#define C0_LOAD 0x02
108#define C0_DUMP 0x03
109#define C0_SETCTX 0x07
110#define C0_GETCTX 0x03
111#define C0_SETDM 0x01
112#define C0_SETPM 0x04
113#define C0_GETDM 0x02
114#define C0_GETPM 0x08
115/*
116 * Change endianness indicator in the BD command field
117 */
118#define CHANGE_ENDIANNESS 0x80
119
120/*
121 * Mode/Count of data node descriptors - IPCv2
122 */
123struct sdma_mode_count {
124 u32 count : 16; /* size of the buffer pointed by this BD */
125 u32 status : 8; /* E,R,I,C,W,D status bits stored here */
126 u32 command : 8; /* command mostlky used for channel 0 */
127};
128
129/*
130 * Buffer descriptor
131 */
132struct sdma_buffer_descriptor {
133 struct sdma_mode_count mode;
134 u32 buffer_addr; /* address of the buffer described */
135 u32 ext_buffer_addr; /* extended buffer address */
136} __attribute__ ((packed));
137
138/**
139 * struct sdma_channel_control - Channel control Block
140 *
141 * @current_bd_ptr current buffer descriptor processed
142 * @base_bd_ptr first element of buffer descriptor array
143 * @unused padding. The SDMA engine expects an array of 128 byte
144 * control blocks
145 */
146struct sdma_channel_control {
147 u32 current_bd_ptr;
148 u32 base_bd_ptr;
149 u32 unused[2];
150} __attribute__ ((packed));
151
152/**
153 * struct sdma_state_registers - SDMA context for a channel
154 *
155 * @pc: program counter
156 * @t: test bit: status of arithmetic & test instruction
157 * @rpc: return program counter
158 * @sf: source fault while loading data
159 * @spc: loop start program counter
160 * @df: destination fault while storing data
161 * @epc: loop end program counter
162 * @lm: loop mode
163 */
164struct sdma_state_registers {
165 u32 pc :14;
166 u32 unused1: 1;
167 u32 t : 1;
168 u32 rpc :14;
169 u32 unused0: 1;
170 u32 sf : 1;
171 u32 spc :14;
172 u32 unused2: 1;
173 u32 df : 1;
174 u32 epc :14;
175 u32 lm : 2;
176} __attribute__ ((packed));
177
178/**
179 * struct sdma_context_data - sdma context specific to a channel
180 *
181 * @channel_state: channel state bits
182 * @gReg: general registers
183 * @mda: burst dma destination address register
184 * @msa: burst dma source address register
185 * @ms: burst dma status register
186 * @md: burst dma data register
187 * @pda: peripheral dma destination address register
188 * @psa: peripheral dma source address register
189 * @ps: peripheral dma status register
190 * @pd: peripheral dma data register
191 * @ca: CRC polynomial register
192 * @cs: CRC accumulator register
193 * @dda: dedicated core destination address register
194 * @dsa: dedicated core source address register
195 * @ds: dedicated core status register
196 * @dd: dedicated core data register
197 */
198struct sdma_context_data {
199 struct sdma_state_registers channel_state;
200 u32 gReg[8];
201 u32 mda;
202 u32 msa;
203 u32 ms;
204 u32 md;
205 u32 pda;
206 u32 psa;
207 u32 ps;
208 u32 pd;
209 u32 ca;
210 u32 cs;
211 u32 dda;
212 u32 dsa;
213 u32 ds;
214 u32 dd;
215 u32 scratch0;
216 u32 scratch1;
217 u32 scratch2;
218 u32 scratch3;
219 u32 scratch4;
220 u32 scratch5;
221 u32 scratch6;
222 u32 scratch7;
223} __attribute__ ((packed));
224
225#define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
226
227struct sdma_engine;
228
229/**
230 * struct sdma_channel - housekeeping for a SDMA channel
231 *
232 * @sdma pointer to the SDMA engine for this channel
233 * @channel the channel number, matches dmaengine chan_id
234 * @direction transfer type. Needed for setting SDMA script
235 * @peripheral_type Peripheral type. Needed for setting SDMA script
236 * @event_id0 aka dma request line
237 * @event_id1 for channels that use 2 events
238 * @word_size peripheral access size
239 * @buf_tail ID of the buffer that was processed
240 * @done channel completion
241 * @num_bd max NUM_BD. number of descriptors currently handling
242 */
243struct sdma_channel {
244 struct sdma_engine *sdma;
245 unsigned int channel;
246 enum dma_data_direction direction;
247 enum sdma_peripheral_type peripheral_type;
248 unsigned int event_id0;
249 unsigned int event_id1;
250 enum dma_slave_buswidth word_size;
251 unsigned int buf_tail;
252 struct completion done;
253 unsigned int num_bd;
254 struct sdma_buffer_descriptor *bd;
255 dma_addr_t bd_phys;
256 unsigned int pc_from_device, pc_to_device;
257 unsigned long flags;
258 dma_addr_t per_address;
259 u32 event_mask0, event_mask1;
260 u32 watermark_level;
261 u32 shp_addr, per_addr;
262 struct dma_chan chan;
263 spinlock_t lock;
264 struct dma_async_tx_descriptor desc;
265 dma_cookie_t last_completed;
266 enum dma_status status;
267};
268
269#define IMX_DMA_SG_LOOP (1 << 0)
270
271#define MAX_DMA_CHANNELS 32
272#define MXC_SDMA_DEFAULT_PRIORITY 1
273#define MXC_SDMA_MIN_PRIORITY 1
274#define MXC_SDMA_MAX_PRIORITY 7
275
276/**
277 * struct sdma_script_start_addrs - SDMA script start pointers
278 *
279 * start addresses of the different functions in the physical
280 * address space of the SDMA engine.
281 */
282struct sdma_script_start_addrs {
283 u32 ap_2_ap_addr;
284 u32 ap_2_bp_addr;
285 u32 ap_2_ap_fixed_addr;
286 u32 bp_2_ap_addr;
287 u32 loopback_on_dsp_side_addr;
288 u32 mcu_interrupt_only_addr;
289 u32 firi_2_per_addr;
290 u32 firi_2_mcu_addr;
291 u32 per_2_firi_addr;
292 u32 mcu_2_firi_addr;
293 u32 uart_2_per_addr;
294 u32 uart_2_mcu_addr;
295 u32 per_2_app_addr;
296 u32 mcu_2_app_addr;
297 u32 per_2_per_addr;
298 u32 uartsh_2_per_addr;
299 u32 uartsh_2_mcu_addr;
300 u32 per_2_shp_addr;
301 u32 mcu_2_shp_addr;
302 u32 ata_2_mcu_addr;
303 u32 mcu_2_ata_addr;
304 u32 app_2_per_addr;
305 u32 app_2_mcu_addr;
306 u32 shp_2_per_addr;
307 u32 shp_2_mcu_addr;
308 u32 mshc_2_mcu_addr;
309 u32 mcu_2_mshc_addr;
310 u32 spdif_2_mcu_addr;
311 u32 mcu_2_spdif_addr;
312 u32 asrc_2_mcu_addr;
313 u32 ext_mem_2_ipu_addr;
314 u32 descrambler_addr;
315 u32 dptc_dvfs_addr;
316 u32 utra_addr;
317 u32 ram_code_start_addr;
318};
319
320#define SDMA_FIRMWARE_MAGIC 0x414d4453
321
322/**
323 * struct sdma_firmware_header - Layout of the firmware image
324 *
325 * @magic "SDMA"
326 * @version_major increased whenever layout of struct sdma_script_start_addrs
327 * changes.
328 * @version_minor firmware minor version (for binary compatible changes)
329 * @script_addrs_start offset of struct sdma_script_start_addrs in this image
330 * @num_script_addrs Number of script addresses in this image
331 * @ram_code_start offset of SDMA ram image in this firmware image
332 * @ram_code_size size of SDMA ram image
333 * @script_addrs Stores the start address of the SDMA scripts
334 * (in SDMA memory space)
335 */
336struct sdma_firmware_header {
337 u32 magic;
338 u32 version_major;
339 u32 version_minor;
340 u32 script_addrs_start;
341 u32 num_script_addrs;
342 u32 ram_code_start;
343 u32 ram_code_size;
344};
345
346struct sdma_engine {
347 struct device *dev;
348 struct sdma_channel channel[MAX_DMA_CHANNELS];
349 struct sdma_channel_control *channel_control;
350 void __iomem *regs;
351 unsigned int version;
352 unsigned int num_events;
353 struct sdma_context_data *context;
354 dma_addr_t context_phys;
355 struct dma_device dma_device;
356 struct clk *clk;
357 struct sdma_script_start_addrs *script_addrs;
358};
359
360#define SDMA_H_CONFIG_DSPDMA (1 << 12) /* indicates if the DSPDMA is used */
361#define SDMA_H_CONFIG_RTD_PINS (1 << 11) /* indicates if Real-Time Debug pins are enabled */
362#define SDMA_H_CONFIG_ACR (1 << 4) /* indicates if AHB freq /core freq = 2 or 1 */
363#define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/
364
365static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
366{
367 u32 chnenbl0 = (sdma->version == 2 ? SDMA_CHNENBL0_V2 : SDMA_CHNENBL0_V1);
368
369 return chnenbl0 + event * 4;
370}
371
372static int sdma_config_ownership(struct sdma_channel *sdmac,
373 bool event_override, bool mcu_override, bool dsp_override)
374{
375 struct sdma_engine *sdma = sdmac->sdma;
376 int channel = sdmac->channel;
377 u32 evt, mcu, dsp;
378
379 if (event_override && mcu_override && dsp_override)
380 return -EINVAL;
381
382 evt = __raw_readl(sdma->regs + SDMA_H_EVTOVR);
383 mcu = __raw_readl(sdma->regs + SDMA_H_HOSTOVR);
384 dsp = __raw_readl(sdma->regs + SDMA_H_DSPOVR);
385
386 if (dsp_override)
387 dsp &= ~(1 << channel);
388 else
389 dsp |= (1 << channel);
390
391 if (event_override)
392 evt &= ~(1 << channel);
393 else
394 evt |= (1 << channel);
395
396 if (mcu_override)
397 mcu &= ~(1 << channel);
398 else
399 mcu |= (1 << channel);
400
401 __raw_writel(evt, sdma->regs + SDMA_H_EVTOVR);
402 __raw_writel(mcu, sdma->regs + SDMA_H_HOSTOVR);
403 __raw_writel(dsp, sdma->regs + SDMA_H_DSPOVR);
404
405 return 0;
406}
407
408/*
409 * sdma_run_channel - run a channel and wait till it's done
410 */
411static int sdma_run_channel(struct sdma_channel *sdmac)
412{
413 struct sdma_engine *sdma = sdmac->sdma;
414 int channel = sdmac->channel;
415 int ret;
416
417 init_completion(&sdmac->done);
418
419 __raw_writel(1 << channel, sdma->regs + SDMA_H_START);
420
421 ret = wait_for_completion_timeout(&sdmac->done, HZ);
422
423 return ret ? 0 : -ETIMEDOUT;
424}
425
426static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
427 u32 address)
428{
429 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
430 void *buf_virt;
431 dma_addr_t buf_phys;
432 int ret;
433
434 buf_virt = dma_alloc_coherent(NULL,
435 size,
436 &buf_phys, GFP_KERNEL);
437 if (!buf_virt)
438 return -ENOMEM;
439
440 bd0->mode.command = C0_SETPM;
441 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
442 bd0->mode.count = size / 2;
443 bd0->buffer_addr = buf_phys;
444 bd0->ext_buffer_addr = address;
445
446 memcpy(buf_virt, buf, size);
447
448 ret = sdma_run_channel(&sdma->channel[0]);
449
450 dma_free_coherent(NULL, size, buf_virt, buf_phys);
451
452 return ret;
453}
454
455static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
456{
457 struct sdma_engine *sdma = sdmac->sdma;
458 int channel = sdmac->channel;
459 u32 val;
460 u32 chnenbl = chnenbl_ofs(sdma, event);
461
462 val = __raw_readl(sdma->regs + chnenbl);
463 val |= (1 << channel);
464 __raw_writel(val, sdma->regs + chnenbl);
465}
466
467static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
468{
469 struct sdma_engine *sdma = sdmac->sdma;
470 int channel = sdmac->channel;
471 u32 chnenbl = chnenbl_ofs(sdma, event);
472 u32 val;
473
474 val = __raw_readl(sdma->regs + chnenbl);
475 val &= ~(1 << channel);
476 __raw_writel(val, sdma->regs + chnenbl);
477}
478
479static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
480{
481 struct sdma_buffer_descriptor *bd;
482
483 /*
484 * loop mode. Iterate over descriptors, re-setup them and
485 * call callback function.
486 */
487 while (1) {
488 bd = &sdmac->bd[sdmac->buf_tail];
489
490 if (bd->mode.status & BD_DONE)
491 break;
492
493 if (bd->mode.status & BD_RROR)
494 sdmac->status = DMA_ERROR;
495 else
496 sdmac->status = DMA_SUCCESS;
497
498 bd->mode.status |= BD_DONE;
499 sdmac->buf_tail++;
500 sdmac->buf_tail %= sdmac->num_bd;
501
502 if (sdmac->desc.callback)
503 sdmac->desc.callback(sdmac->desc.callback_param);
504 }
505}
506
507static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
508{
509 struct sdma_buffer_descriptor *bd;
510 int i, error = 0;
511
512 /*
513 * non loop mode. Iterate over all descriptors, collect
514 * errors and call callback function
515 */
516 for (i = 0; i < sdmac->num_bd; i++) {
517 bd = &sdmac->bd[i];
518
519 if (bd->mode.status & (BD_DONE | BD_RROR))
520 error = -EIO;
521 }
522
523 if (error)
524 sdmac->status = DMA_ERROR;
525 else
526 sdmac->status = DMA_SUCCESS;
527
528 if (sdmac->desc.callback)
529 sdmac->desc.callback(sdmac->desc.callback_param);
530 sdmac->last_completed = sdmac->desc.cookie;
531}
532
533static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
534{
535 complete(&sdmac->done);
536
537 /* not interested in channel 0 interrupts */
538 if (sdmac->channel == 0)
539 return;
540
541 if (sdmac->flags & IMX_DMA_SG_LOOP)
542 sdma_handle_channel_loop(sdmac);
543 else
544 mxc_sdma_handle_channel_normal(sdmac);
545}
546
547static irqreturn_t sdma_int_handler(int irq, void *dev_id)
548{
549 struct sdma_engine *sdma = dev_id;
550 u32 stat;
551
552 stat = __raw_readl(sdma->regs + SDMA_H_INTR);
553 __raw_writel(stat, sdma->regs + SDMA_H_INTR);
554
555 while (stat) {
556 int channel = fls(stat) - 1;
557 struct sdma_channel *sdmac = &sdma->channel[channel];
558
559 mxc_sdma_handle_channel(sdmac);
560
561 stat &= ~(1 << channel);
562 }
563
564 return IRQ_HANDLED;
565}
566
567/*
568 * sets the pc of SDMA script according to the peripheral type
569 */
570static void sdma_get_pc(struct sdma_channel *sdmac,
571 enum sdma_peripheral_type peripheral_type)
572{
573 struct sdma_engine *sdma = sdmac->sdma;
574 int per_2_emi = 0, emi_2_per = 0;
575 /*
576 * These are needed once we start to support transfers between
577 * two peripherals or memory-to-memory transfers
578 */
579 int per_2_per = 0, emi_2_emi = 0;
580
581 sdmac->pc_from_device = 0;
582 sdmac->pc_to_device = 0;
583
584 switch (peripheral_type) {
585 case IMX_DMATYPE_MEMORY:
586 emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
587 break;
588 case IMX_DMATYPE_DSP:
589 emi_2_per = sdma->script_addrs->bp_2_ap_addr;
590 per_2_emi = sdma->script_addrs->ap_2_bp_addr;
591 break;
592 case IMX_DMATYPE_FIRI:
593 per_2_emi = sdma->script_addrs->firi_2_mcu_addr;
594 emi_2_per = sdma->script_addrs->mcu_2_firi_addr;
595 break;
596 case IMX_DMATYPE_UART:
597 per_2_emi = sdma->script_addrs->uart_2_mcu_addr;
598 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
599 break;
600 case IMX_DMATYPE_UART_SP:
601 per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr;
602 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
603 break;
604 case IMX_DMATYPE_ATA:
605 per_2_emi = sdma->script_addrs->ata_2_mcu_addr;
606 emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
607 break;
608 case IMX_DMATYPE_CSPI:
609 case IMX_DMATYPE_EXT:
610 case IMX_DMATYPE_SSI:
611 per_2_emi = sdma->script_addrs->app_2_mcu_addr;
612 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
613 break;
614 case IMX_DMATYPE_SSI_SP:
615 case IMX_DMATYPE_MMC:
616 case IMX_DMATYPE_SDHC:
617 case IMX_DMATYPE_CSPI_SP:
618 case IMX_DMATYPE_ESAI:
619 case IMX_DMATYPE_MSHC_SP:
620 per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
621 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
622 break;
623 case IMX_DMATYPE_ASRC:
624 per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
625 emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
626 per_2_per = sdma->script_addrs->per_2_per_addr;
627 break;
628 case IMX_DMATYPE_MSHC:
629 per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
630 emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
631 break;
632 case IMX_DMATYPE_CCM:
633 per_2_emi = sdma->script_addrs->dptc_dvfs_addr;
634 break;
635 case IMX_DMATYPE_SPDIF:
636 per_2_emi = sdma->script_addrs->spdif_2_mcu_addr;
637 emi_2_per = sdma->script_addrs->mcu_2_spdif_addr;
638 break;
639 case IMX_DMATYPE_IPU_MEMORY:
640 emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
641 break;
642 default:
643 break;
644 }
645
646 sdmac->pc_from_device = per_2_emi;
647 sdmac->pc_to_device = emi_2_per;
648}
649
650static int sdma_load_context(struct sdma_channel *sdmac)
651{
652 struct sdma_engine *sdma = sdmac->sdma;
653 int channel = sdmac->channel;
654 int load_address;
655 struct sdma_context_data *context = sdma->context;
656 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
657 int ret;
658
659 if (sdmac->direction == DMA_FROM_DEVICE) {
660 load_address = sdmac->pc_from_device;
661 } else {
662 load_address = sdmac->pc_to_device;
663 }
664
665 if (load_address < 0)
666 return load_address;
667
668 dev_dbg(sdma->dev, "load_address = %d\n", load_address);
669 dev_dbg(sdma->dev, "wml = 0x%08x\n", sdmac->watermark_level);
670 dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
671 dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
672 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0);
673 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1);
674
675 memset(context, 0, sizeof(*context));
676 context->channel_state.pc = load_address;
677
678 /* Send by context the event mask,base address for peripheral
679 * and watermark level
680 */
681 context->gReg[0] = sdmac->event_mask1;
682 context->gReg[1] = sdmac->event_mask0;
683 context->gReg[2] = sdmac->per_addr;
684 context->gReg[6] = sdmac->shp_addr;
685 context->gReg[7] = sdmac->watermark_level;
686
687 bd0->mode.command = C0_SETDM;
688 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
689 bd0->mode.count = sizeof(*context) / 4;
690 bd0->buffer_addr = sdma->context_phys;
691 bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
692
693 ret = sdma_run_channel(&sdma->channel[0]);
694
695 return ret;
696}
697
698static void sdma_disable_channel(struct sdma_channel *sdmac)
699{
700 struct sdma_engine *sdma = sdmac->sdma;
701 int channel = sdmac->channel;
702
703 __raw_writel(1 << channel, sdma->regs + SDMA_H_STATSTOP);
704 sdmac->status = DMA_ERROR;
705}
706
707static int sdma_config_channel(struct sdma_channel *sdmac)
708{
709 int ret;
710
711 sdma_disable_channel(sdmac);
712
713 sdmac->event_mask0 = 0;
714 sdmac->event_mask1 = 0;
715 sdmac->shp_addr = 0;
716 sdmac->per_addr = 0;
717
718 if (sdmac->event_id0) {
719 if (sdmac->event_id0 > 32)
720 return -EINVAL;
721 sdma_event_enable(sdmac, sdmac->event_id0);
722 }
723
724 switch (sdmac->peripheral_type) {
725 case IMX_DMATYPE_DSP:
726 sdma_config_ownership(sdmac, false, true, true);
727 break;
728 case IMX_DMATYPE_MEMORY:
729 sdma_config_ownership(sdmac, false, true, false);
730 break;
731 default:
732 sdma_config_ownership(sdmac, true, true, false);
733 break;
734 }
735
736 sdma_get_pc(sdmac, sdmac->peripheral_type);
737
738 if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
739 (sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
740 /* Handle multiple event channels differently */
741 if (sdmac->event_id1) {
742 sdmac->event_mask1 = 1 << (sdmac->event_id1 % 32);
743 if (sdmac->event_id1 > 31)
744 sdmac->watermark_level |= 1 << 31;
745 sdmac->event_mask0 = 1 << (sdmac->event_id0 % 32);
746 if (sdmac->event_id0 > 31)
747 sdmac->watermark_level |= 1 << 30;
748 } else {
749 sdmac->event_mask0 = 1 << sdmac->event_id0;
750 sdmac->event_mask1 = 1 << (sdmac->event_id0 - 32);
751 }
752 /* Watermark Level */
753 sdmac->watermark_level |= sdmac->watermark_level;
754 /* Address */
755 sdmac->shp_addr = sdmac->per_address;
756 } else {
757 sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
758 }
759
760 ret = sdma_load_context(sdmac);
761
762 return ret;
763}
764
765static int sdma_set_channel_priority(struct sdma_channel *sdmac,
766 unsigned int priority)
767{
768 struct sdma_engine *sdma = sdmac->sdma;
769 int channel = sdmac->channel;
770
771 if (priority < MXC_SDMA_MIN_PRIORITY
772 || priority > MXC_SDMA_MAX_PRIORITY) {
773 return -EINVAL;
774 }
775
776 __raw_writel(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
777
778 return 0;
779}
780
781static int sdma_request_channel(struct sdma_channel *sdmac)
782{
783 struct sdma_engine *sdma = sdmac->sdma;
784 int channel = sdmac->channel;
785 int ret = -EBUSY;
786
787 sdmac->bd = dma_alloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, GFP_KERNEL);
788 if (!sdmac->bd) {
789 ret = -ENOMEM;
790 goto out;
791 }
792
793 memset(sdmac->bd, 0, PAGE_SIZE);
794
795 sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
796 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
797
798 clk_enable(sdma->clk);
799
800 sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
801
802 init_completion(&sdmac->done);
803
804 sdmac->buf_tail = 0;
805
806 return 0;
807out:
808
809 return ret;
810}
811
812static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
813{
814 __raw_writel(1 << channel, sdma->regs + SDMA_H_START);
815}
816
817static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdma)
818{
819 dma_cookie_t cookie = sdma->chan.cookie;
820
821 if (++cookie < 0)
822 cookie = 1;
823
824 sdma->chan.cookie = cookie;
825 sdma->desc.cookie = cookie;
826
827 return cookie;
828}
829
830static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
831{
832 return container_of(chan, struct sdma_channel, chan);
833}
834
835static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
836{
837 struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
838 struct sdma_engine *sdma = sdmac->sdma;
839 dma_cookie_t cookie;
840
841 spin_lock_irq(&sdmac->lock);
842
843 cookie = sdma_assign_cookie(sdmac);
844
845 sdma_enable_channel(sdma, tx->chan->chan_id);
846
847 spin_unlock_irq(&sdmac->lock);
848
849 return cookie;
850}
851
852static int sdma_alloc_chan_resources(struct dma_chan *chan)
853{
854 struct sdma_channel *sdmac = to_sdma_chan(chan);
855 struct imx_dma_data *data = chan->private;
856 int prio, ret;
857
858 /* No need to execute this for internal channel 0 */
859 if (chan->chan_id == 0)
860 return 0;
861
862 if (!data)
863 return -EINVAL;
864
865 switch (data->priority) {
866 case DMA_PRIO_HIGH:
867 prio = 3;
868 break;
869 case DMA_PRIO_MEDIUM:
870 prio = 2;
871 break;
872 case DMA_PRIO_LOW:
873 default:
874 prio = 1;
875 break;
876 }
877
878 sdmac->peripheral_type = data->peripheral_type;
879 sdmac->event_id0 = data->dma_request;
880 ret = sdma_set_channel_priority(sdmac, prio);
881 if (ret)
882 return ret;
883
884 ret = sdma_request_channel(sdmac);
885 if (ret)
886 return ret;
887
888 dma_async_tx_descriptor_init(&sdmac->desc, chan);
889 sdmac->desc.tx_submit = sdma_tx_submit;
890 /* txd.flags will be overwritten in prep funcs */
891 sdmac->desc.flags = DMA_CTRL_ACK;
892
893 return 0;
894}
895
896static void sdma_free_chan_resources(struct dma_chan *chan)
897{
898 struct sdma_channel *sdmac = to_sdma_chan(chan);
899 struct sdma_engine *sdma = sdmac->sdma;
900
901 sdma_disable_channel(sdmac);
902
903 if (sdmac->event_id0)
904 sdma_event_disable(sdmac, sdmac->event_id0);
905 if (sdmac->event_id1)
906 sdma_event_disable(sdmac, sdmac->event_id1);
907
908 sdmac->event_id0 = 0;
909 sdmac->event_id1 = 0;
910
911 sdma_set_channel_priority(sdmac, 0);
912
913 dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
914
915 clk_disable(sdma->clk);
916}
917
918static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
919 struct dma_chan *chan, struct scatterlist *sgl,
920 unsigned int sg_len, enum dma_data_direction direction,
921 unsigned long flags)
922{
923 struct sdma_channel *sdmac = to_sdma_chan(chan);
924 struct sdma_engine *sdma = sdmac->sdma;
925 int ret, i, count;
926 int channel = chan->chan_id;
927 struct scatterlist *sg;
928
929 if (sdmac->status == DMA_IN_PROGRESS)
930 return NULL;
931 sdmac->status = DMA_IN_PROGRESS;
932
933 sdmac->flags = 0;
934
935 dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
936 sg_len, channel);
937
938 sdmac->direction = direction;
939 ret = sdma_load_context(sdmac);
940 if (ret)
941 goto err_out;
942
943 if (sg_len > NUM_BD) {
944 dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
945 channel, sg_len, NUM_BD);
946 ret = -EINVAL;
947 goto err_out;
948 }
949
950 for_each_sg(sgl, sg, sg_len, i) {
951 struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
952 int param;
953
954 bd->buffer_addr = sgl->dma_address;
955
956 count = sg->length;
957
958 if (count > 0xffff) {
959 dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
960 channel, count, 0xffff);
961 ret = -EINVAL;
962 goto err_out;
963 }
964
965 bd->mode.count = count;
966
967 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
968 ret = -EINVAL;
969 goto err_out;
970 }
971 if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
972 bd->mode.command = 0;
973 else
974 bd->mode.command = sdmac->word_size;
975
976 param = BD_DONE | BD_EXTD | BD_CONT;
977
978 if (sdmac->flags & IMX_DMA_SG_LOOP) {
979 param |= BD_INTR;
980 if (i + 1 == sg_len)
981 param |= BD_WRAP;
982 }
983
984 if (i + 1 == sg_len)
985 param |= BD_INTR;
986
987 dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
988 i, count, sg->dma_address,
989 param & BD_WRAP ? "wrap" : "",
990 param & BD_INTR ? " intr" : "");
991
992 bd->mode.status = param;
993 }
994
995 sdmac->num_bd = sg_len;
996 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
997
998 return &sdmac->desc;
999err_out:
1000 return NULL;
1001}
1002
1003static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1004 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
1005 size_t period_len, enum dma_data_direction direction)
1006{
1007 struct sdma_channel *sdmac = to_sdma_chan(chan);
1008 struct sdma_engine *sdma = sdmac->sdma;
1009 int num_periods = buf_len / period_len;
1010 int channel = chan->chan_id;
1011 int ret, i = 0, buf = 0;
1012
1013 dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
1014
1015 if (sdmac->status == DMA_IN_PROGRESS)
1016 return NULL;
1017
1018 sdmac->status = DMA_IN_PROGRESS;
1019
1020 sdmac->flags |= IMX_DMA_SG_LOOP;
1021 sdmac->direction = direction;
1022 ret = sdma_load_context(sdmac);
1023 if (ret)
1024 goto err_out;
1025
1026 if (num_periods > NUM_BD) {
1027 dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
1028 channel, num_periods, NUM_BD);
1029 goto err_out;
1030 }
1031
1032 if (period_len > 0xffff) {
1033 dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n",
1034 channel, period_len, 0xffff);
1035 goto err_out;
1036 }
1037
1038 while (buf < buf_len) {
1039 struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
1040 int param;
1041
1042 bd->buffer_addr = dma_addr;
1043
1044 bd->mode.count = period_len;
1045
1046 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1047 goto err_out;
1048 if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
1049 bd->mode.command = 0;
1050 else
1051 bd->mode.command = sdmac->word_size;
1052
1053 param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
1054 if (i + 1 == num_periods)
1055 param |= BD_WRAP;
1056
1057 dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
1058 i, period_len, dma_addr,
1059 param & BD_WRAP ? "wrap" : "",
1060 param & BD_INTR ? " intr" : "");
1061
1062 bd->mode.status = param;
1063
1064 dma_addr += period_len;
1065 buf += period_len;
1066
1067 i++;
1068 }
1069
1070 sdmac->num_bd = num_periods;
1071 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1072
1073 return &sdmac->desc;
1074err_out:
1075 sdmac->status = DMA_ERROR;
1076 return NULL;
1077}
1078
1079static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1080 unsigned long arg)
1081{
1082 struct sdma_channel *sdmac = to_sdma_chan(chan);
1083 struct dma_slave_config *dmaengine_cfg = (void *)arg;
1084
1085 switch (cmd) {
1086 case DMA_TERMINATE_ALL:
1087 sdma_disable_channel(sdmac);
1088 return 0;
1089 case DMA_SLAVE_CONFIG:
1090 if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
1091 sdmac->per_address = dmaengine_cfg->src_addr;
1092 sdmac->watermark_level = dmaengine_cfg->src_maxburst;
1093 sdmac->word_size = dmaengine_cfg->src_addr_width;
1094 } else {
1095 sdmac->per_address = dmaengine_cfg->dst_addr;
1096 sdmac->watermark_level = dmaengine_cfg->dst_maxburst;
1097 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1098 }
1099 return sdma_config_channel(sdmac);
1100 default:
1101 return -ENOSYS;
1102 }
1103
1104 return -EINVAL;
1105}
1106
1107static enum dma_status sdma_tx_status(struct dma_chan *chan,
1108 dma_cookie_t cookie,
1109 struct dma_tx_state *txstate)
1110{
1111 struct sdma_channel *sdmac = to_sdma_chan(chan);
1112 dma_cookie_t last_used;
1113 enum dma_status ret;
1114
1115 last_used = chan->cookie;
1116
1117 ret = dma_async_is_complete(cookie, sdmac->last_completed, last_used);
1118 dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0);
1119
1120 return ret;
1121}
1122
1123static void sdma_issue_pending(struct dma_chan *chan)
1124{
1125 /*
1126 * Nothing to do. We only have a single descriptor
1127 */
1128}
1129
1130static int __init sdma_init(struct sdma_engine *sdma,
1131 void *ram_code, int ram_code_size)
1132{
1133 int i, ret;
1134 dma_addr_t ccb_phys;
1135
1136 switch (sdma->version) {
1137 case 1:
1138 sdma->num_events = 32;
1139 break;
1140 case 2:
1141 sdma->num_events = 48;
1142 break;
1143 default:
1144 dev_err(sdma->dev, "Unknown version %d. aborting\n", sdma->version);
1145 return -ENODEV;
1146 }
1147
1148 clk_enable(sdma->clk);
1149
1150 /* Be sure SDMA has not started yet */
1151 __raw_writel(0, sdma->regs + SDMA_H_C0PTR);
1152
1153 sdma->channel_control = dma_alloc_coherent(NULL,
1154 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
1155 sizeof(struct sdma_context_data),
1156 &ccb_phys, GFP_KERNEL);
1157
1158 if (!sdma->channel_control) {
1159 ret = -ENOMEM;
1160 goto err_dma_alloc;
1161 }
1162
1163 sdma->context = (void *)sdma->channel_control +
1164 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1165 sdma->context_phys = ccb_phys +
1166 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1167
1168 /* Zero-out the CCB structures array just allocated */
1169 memset(sdma->channel_control, 0,
1170 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));
1171
1172 /* disable all channels */
1173 for (i = 0; i < sdma->num_events; i++)
1174 __raw_writel(0, sdma->regs + chnenbl_ofs(sdma, i));
1175
1176 /* All channels have priority 0 */
1177 for (i = 0; i < MAX_DMA_CHANNELS; i++)
1178 __raw_writel(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
1179
1180 ret = sdma_request_channel(&sdma->channel[0]);
1181 if (ret)
1182 goto err_dma_alloc;
1183
1184 sdma_config_ownership(&sdma->channel[0], false, true, false);
1185
1186 /* Set Command Channel (Channel Zero) */
1187 __raw_writel(0x4050, sdma->regs + SDMA_CHN0ADDR);
1188
1189 /* Set bits of CONFIG register but with static context switching */
1190 /* FIXME: Check whether to set ACR bit depending on clock ratios */
1191 __raw_writel(0, sdma->regs + SDMA_H_CONFIG);
1192
1193 __raw_writel(ccb_phys, sdma->regs + SDMA_H_C0PTR);
1194
1195 /* download the RAM image for SDMA */
1196 sdma_load_script(sdma, ram_code,
1197 ram_code_size,
1198 sdma->script_addrs->ram_code_start_addr);
1199
1200 /* Set bits of CONFIG register with given context switching mode */
1201 __raw_writel(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
1202
1203 /* Initializes channel's priorities */
1204 sdma_set_channel_priority(&sdma->channel[0], 7);
1205
1206 clk_disable(sdma->clk);
1207
1208 return 0;
1209
1210err_dma_alloc:
1211 clk_disable(sdma->clk);
1212 dev_err(sdma->dev, "initialisation failed with %d\n", ret);
1213 return ret;
1214}
1215
1216static int __init sdma_probe(struct platform_device *pdev)
1217{
1218 int ret;
1219 const struct firmware *fw;
1220 const struct sdma_firmware_header *header;
1221 const struct sdma_script_start_addrs *addr;
1222 int irq;
1223 unsigned short *ram_code;
1224 struct resource *iores;
1225 struct sdma_platform_data *pdata = pdev->dev.platform_data;
1226 char *fwname;
1227 int i;
1228 dma_cap_mask_t mask;
1229 struct sdma_engine *sdma;
1230
1231 sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
1232 if (!sdma)
1233 return -ENOMEM;
1234
1235 sdma->dev = &pdev->dev;
1236
1237 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1238 irq = platform_get_irq(pdev, 0);
1239 if (!iores || irq < 0 || !pdata) {
1240 ret = -EINVAL;
1241 goto err_irq;
1242 }
1243
1244 if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) {
1245 ret = -EBUSY;
1246 goto err_request_region;
1247 }
1248
1249 sdma->clk = clk_get(&pdev->dev, NULL);
1250 if (IS_ERR(sdma->clk)) {
1251 ret = PTR_ERR(sdma->clk);
1252 goto err_clk;
1253 }
1254
1255 sdma->regs = ioremap(iores->start, resource_size(iores));
1256 if (!sdma->regs) {
1257 ret = -ENOMEM;
1258 goto err_ioremap;
1259 }
1260
1261 ret = request_irq(irq, sdma_int_handler, 0, "sdma", sdma);
1262 if (ret)
1263 goto err_request_irq;
1264
1265 fwname = kasprintf(GFP_KERNEL, "sdma-%s-to%d.bin",
1266 pdata->cpu_name, pdata->to_version);
1267 if (!fwname) {
1268 ret = -ENOMEM;
1269 goto err_cputype;
1270 }
1271
1272 ret = request_firmware(&fw, fwname, &pdev->dev);
1273 if (ret) {
1274 dev_err(&pdev->dev, "request firmware \"%s\" failed with %d\n",
1275 fwname, ret);
1276 kfree(fwname);
1277 goto err_cputype;
1278 }
1279 kfree(fwname);
1280
1281 if (fw->size < sizeof(*header))
1282 goto err_firmware;
1283
1284 header = (struct sdma_firmware_header *)fw->data;
1285
1286 if (header->magic != SDMA_FIRMWARE_MAGIC)
1287 goto err_firmware;
1288 if (header->ram_code_start + header->ram_code_size > fw->size)
1289 goto err_firmware;
1290
1291 addr = (void *)header + header->script_addrs_start;
1292 ram_code = (void *)header + header->ram_code_start;
1293 sdma->script_addrs = kmalloc(sizeof(*addr), GFP_KERNEL);
1294 if (!sdma->script_addrs)
1295 goto err_firmware;
1296 memcpy(sdma->script_addrs, addr, sizeof(*addr));
1297
1298 sdma->version = pdata->sdma_version;
1299
1300 INIT_LIST_HEAD(&sdma->dma_device.channels);
1301 /* Initialize channel parameters */
1302 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
1303 struct sdma_channel *sdmac = &sdma->channel[i];
1304
1305 sdmac->sdma = sdma;
1306 spin_lock_init(&sdmac->lock);
1307
1308 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
1309 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
1310
1311 sdmac->chan.device = &sdma->dma_device;
1312 sdmac->chan.chan_id = i;
1313 sdmac->channel = i;
1314
1315 /* Add the channel to the DMAC list */
1316 list_add_tail(&sdmac->chan.device_node, &sdma->dma_device.channels);
1317 }
1318
1319 ret = sdma_init(sdma, ram_code, header->ram_code_size);
1320 if (ret)
1321 goto err_init;
1322
1323 sdma->dma_device.dev = &pdev->dev;
1324
1325 sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
1326 sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
1327 sdma->dma_device.device_tx_status = sdma_tx_status;
1328 sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
1329 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
1330 sdma->dma_device.device_control = sdma_control;
1331 sdma->dma_device.device_issue_pending = sdma_issue_pending;
1332
1333 ret = dma_async_device_register(&sdma->dma_device);
1334 if (ret) {
1335 dev_err(&pdev->dev, "unable to register\n");
1336 goto err_init;
1337 }
1338
1339 dev_info(&pdev->dev, "initialized (firmware %d.%d)\n",
1340 header->version_major,
1341 header->version_minor);
1342
1343 /* request channel 0. This is an internal control channel
1344 * to the SDMA engine and not available to clients.
1345 */
1346 dma_cap_zero(mask);
1347 dma_cap_set(DMA_SLAVE, mask);
1348 dma_request_channel(mask, NULL, NULL);
1349
1350 release_firmware(fw);
1351
1352 return 0;
1353
1354err_init:
1355 kfree(sdma->script_addrs);
1356err_firmware:
1357 release_firmware(fw);
1358err_cputype:
1359 free_irq(irq, sdma);
1360err_request_irq:
1361 iounmap(sdma->regs);
1362err_ioremap:
1363 clk_put(sdma->clk);
1364err_clk:
1365 release_mem_region(iores->start, resource_size(iores));
1366err_request_region:
1367err_irq:
1368 kfree(sdma);
1369 return 0;
1370}
1371
1372static int __exit sdma_remove(struct platform_device *pdev)
1373{
1374 return -EBUSY;
1375}
1376
1377static struct platform_driver sdma_driver = {
1378 .driver = {
1379 .name = "imx-sdma",
1380 },
1381 .remove = __exit_p(sdma_remove),
1382};
1383
1384static int __init sdma_module_init(void)
1385{
1386 return platform_driver_probe(&sdma_driver, sdma_probe);
1387}
1388subsys_initcall(sdma_module_init);
1389
1390MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1391MODULE_DESCRIPTION("i.MX SDMA driver");
1392MODULE_LICENSE("GPL");