aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig24
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/amba-pl08x.c2167
-rw-r--r--drivers/dma/dmaengine.c2
-rw-r--r--drivers/dma/imx-dma.c422
-rw-r--r--drivers/dma/imx-sdma.c1392
-rw-r--r--drivers/dma/intel_mid_dma.c476
-rw-r--r--drivers/dma/intel_mid_dma_regs.h53
-rw-r--r--drivers/dma/ste_dma40.c958
-rw-r--r--drivers/dma/ste_dma40_ll.c172
-rw-r--r--drivers/dma/ste_dma40_ll.h79
11 files changed, 5044 insertions, 704 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 9520cf02edc8..ab28f6093414 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -49,6 +49,14 @@ config INTEL_MID_DMAC
49config ASYNC_TX_DISABLE_CHANNEL_SWITCH 49config ASYNC_TX_DISABLE_CHANNEL_SWITCH
50 bool 50 bool
51 51
52config AMBA_PL08X
53 bool "ARM PrimeCell PL080 or PL081 support"
54 depends on ARM_AMBA && EXPERIMENTAL
55 select DMA_ENGINE
56 help
57 Platform has a PL08x DMAC device
58 which can provide DMA engine support
59
52config INTEL_IOATDMA 60config INTEL_IOATDMA
53 tristate "Intel I/OAT DMA support" 61 tristate "Intel I/OAT DMA support"
54 depends on PCI && X86 62 depends on PCI && X86
@@ -195,6 +203,22 @@ config PCH_DMA
195 help 203 help
196 Enable support for the Topcliff PCH DMA engine. 204 Enable support for the Topcliff PCH DMA engine.
197 205
206config IMX_SDMA
207 tristate "i.MX SDMA support"
208 depends on ARCH_MX25 || ARCH_MX3 || ARCH_MX5
209 select DMA_ENGINE
210 help
211 Support the i.MX SDMA engine. This engine is integrated into
212 Freescale i.MX25/31/35/51 chips.
213
214config IMX_DMA
215 tristate "i.MX DMA support"
216 depends on ARCH_MX1 || ARCH_MX21 || MACH_MX27
217 select DMA_ENGINE
218 help
219 Support the i.MX DMA engine. This engine is integrated into
220 Freescale i.MX1/21/27 chips.
221
198config DMA_ENGINE 222config DMA_ENGINE
199 bool 223 bool
200 224
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 72bd70384d8a..a8a84f4587f2 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -21,7 +21,10 @@ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
21obj-$(CONFIG_SH_DMAE) += shdma.o 21obj-$(CONFIG_SH_DMAE) += shdma.o
22obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o 22obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
23obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ 23obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
24obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
25obj-$(CONFIG_IMX_DMA) += imx-dma.o
24obj-$(CONFIG_TIMB_DMA) += timb_dma.o 26obj-$(CONFIG_TIMB_DMA) += timb_dma.o
25obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o 27obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
26obj-$(CONFIG_PL330_DMA) += pl330.o 28obj-$(CONFIG_PL330_DMA) += pl330.o
27obj-$(CONFIG_PCH_DMA) += pch_dma.o 29obj-$(CONFIG_PCH_DMA) += pch_dma.o
30obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
new file mode 100644
index 000000000000..b605cc9ac3a2
--- /dev/null
+++ b/drivers/dma/amba-pl08x.c
@@ -0,0 +1,2167 @@
1/*
2 * Copyright (c) 2006 ARM Ltd.
3 * Copyright (c) 2010 ST-Ericsson SA
4 *
5 * Author: Peter Pearse <peter.pearse@arm.com>
6 * Author: Linus Walleij <linus.walleij@stericsson.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * The full GNU General Public License is iin this distribution in the
23 * file called COPYING.
24 *
25 * Documentation: ARM DDI 0196G == PL080
26 * Documentation: ARM DDI 0218E == PL081
27 *
28 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to
29 * any channel.
30 *
31 * The PL080 has 8 channels available for simultaneous use, and the PL081
32 * has only two channels. So on these DMA controllers the number of channels
33 * and the number of incoming DMA signals are two totally different things.
34 * It is usually not possible to theoretically handle all physical signals,
35 * so a multiplexing scheme with possible denial of use is necessary.
36 *
37 * The PL080 has a dual bus master, PL081 has a single master.
38 *
39 * Memory to peripheral transfer may be visualized as
40 * Get data from memory to DMAC
41 * Until no data left
42 * On burst request from peripheral
43 * Destination burst from DMAC to peripheral
44 * Clear burst request
45 * Raise terminal count interrupt
46 *
47 * For peripherals with a FIFO:
48 * Source burst size == half the depth of the peripheral FIFO
49 * Destination burst size == the depth of the peripheral FIFO
50 *
51 * (Bursts are irrelevant for mem to mem transfers - there are no burst
52 * signals, the DMA controller will simply facilitate its AHB master.)
53 *
54 * ASSUMES default (little) endianness for DMA transfers
55 *
56 * Only DMAC flow control is implemented
57 *
58 * Global TODO:
59 * - Break out common code from arch/arm/mach-s3c64xx and share
60 */
61#include <linux/device.h>
62#include <linux/init.h>
63#include <linux/module.h>
64#include <linux/pci.h>
65#include <linux/interrupt.h>
66#include <linux/slab.h>
67#include <linux/dmapool.h>
68#include <linux/amba/bus.h>
69#include <linux/dmaengine.h>
70#include <linux/amba/pl08x.h>
71#include <linux/debugfs.h>
72#include <linux/seq_file.h>
73
74#include <asm/hardware/pl080.h>
75#include <asm/dma.h>
76#include <asm/mach/dma.h>
77#include <asm/atomic.h>
78#include <asm/processor.h>
79#include <asm/cacheflush.h>
80
81#define DRIVER_NAME "pl08xdmac"
82
83/**
84 * struct vendor_data - vendor-specific config parameters
85 * for PL08x derivates
86 * @name: the name of this specific variant
87 * @channels: the number of channels available in this variant
88 * @dualmaster: whether this version supports dual AHB masters
89 * or not.
90 */
91struct vendor_data {
92 char *name;
93 u8 channels;
94 bool dualmaster;
95};
96
97/*
98 * PL08X private data structures
99 * An LLI struct - see pl08x TRM
100 * Note that next uses bit[0] as a bus bit,
101 * start & end do not - their bus bit info
102 * is in cctl
103 */
104struct lli {
105 dma_addr_t src;
106 dma_addr_t dst;
107 dma_addr_t next;
108 u32 cctl;
109};
110
111/**
112 * struct pl08x_driver_data - the local state holder for the PL08x
113 * @slave: slave engine for this instance
114 * @memcpy: memcpy engine for this instance
115 * @base: virtual memory base (remapped) for the PL08x
116 * @adev: the corresponding AMBA (PrimeCell) bus entry
117 * @vd: vendor data for this PL08x variant
118 * @pd: platform data passed in from the platform/machine
119 * @phy_chans: array of data for the physical channels
120 * @pool: a pool for the LLI descriptors
121 * @pool_ctr: counter of LLIs in the pool
122 * @lock: a spinlock for this struct
123 */
124struct pl08x_driver_data {
125 struct dma_device slave;
126 struct dma_device memcpy;
127 void __iomem *base;
128 struct amba_device *adev;
129 struct vendor_data *vd;
130 struct pl08x_platform_data *pd;
131 struct pl08x_phy_chan *phy_chans;
132 struct dma_pool *pool;
133 int pool_ctr;
134 spinlock_t lock;
135};
136
137/*
138 * PL08X specific defines
139 */
140
141/*
142 * Memory boundaries: the manual for PL08x says that the controller
143 * cannot read past a 1KiB boundary, so these defines are used to
144 * create transfer LLIs that do not cross such boundaries.
145 */
146#define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */
147#define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT)
148
149/* Minimum period between work queue runs */
150#define PL08X_WQ_PERIODMIN 20
151
152/* Size (bytes) of each LLI buffer allocated for one transfer */
153# define PL08X_LLI_TSFR_SIZE 0x2000
154
155/* Maximimum times we call dma_pool_alloc on this pool without freeing */
156#define PL08X_MAX_ALLOCS 0x40
157#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct lli))
158#define PL08X_ALIGN 8
159
160static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
161{
162 return container_of(chan, struct pl08x_dma_chan, chan);
163}
164
165/*
166 * Physical channel handling
167 */
168
169/* Whether a certain channel is busy or not */
170static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
171{
172 unsigned int val;
173
174 val = readl(ch->base + PL080_CH_CONFIG);
175 return val & PL080_CONFIG_ACTIVE;
176}
177
178/*
179 * Set the initial DMA register values i.e. those for the first LLI
180 * The next lli pointer and the configuration interrupt bit have
181 * been set when the LLIs were constructed
182 */
183static void pl08x_set_cregs(struct pl08x_driver_data *pl08x,
184 struct pl08x_phy_chan *ch)
185{
186 /* Wait for channel inactive */
187 while (pl08x_phy_channel_busy(ch))
188 ;
189
190 dev_vdbg(&pl08x->adev->dev,
191 "WRITE channel %d: csrc=%08x, cdst=%08x, "
192 "cctl=%08x, clli=%08x, ccfg=%08x\n",
193 ch->id,
194 ch->csrc,
195 ch->cdst,
196 ch->cctl,
197 ch->clli,
198 ch->ccfg);
199
200 writel(ch->csrc, ch->base + PL080_CH_SRC_ADDR);
201 writel(ch->cdst, ch->base + PL080_CH_DST_ADDR);
202 writel(ch->clli, ch->base + PL080_CH_LLI);
203 writel(ch->cctl, ch->base + PL080_CH_CONTROL);
204 writel(ch->ccfg, ch->base + PL080_CH_CONFIG);
205}
206
207static inline void pl08x_config_phychan_for_txd(struct pl08x_dma_chan *plchan)
208{
209 struct pl08x_channel_data *cd = plchan->cd;
210 struct pl08x_phy_chan *phychan = plchan->phychan;
211 struct pl08x_txd *txd = plchan->at;
212
213 /* Copy the basic control register calculated at transfer config */
214 phychan->csrc = txd->csrc;
215 phychan->cdst = txd->cdst;
216 phychan->clli = txd->clli;
217 phychan->cctl = txd->cctl;
218
219 /* Assign the signal to the proper control registers */
220 phychan->ccfg = cd->ccfg;
221 phychan->ccfg &= ~PL080_CONFIG_SRC_SEL_MASK;
222 phychan->ccfg &= ~PL080_CONFIG_DST_SEL_MASK;
223 /* If it wasn't set from AMBA, ignore it */
224 if (txd->direction == DMA_TO_DEVICE)
225 /* Select signal as destination */
226 phychan->ccfg |=
227 (phychan->signal << PL080_CONFIG_DST_SEL_SHIFT);
228 else if (txd->direction == DMA_FROM_DEVICE)
229 /* Select signal as source */
230 phychan->ccfg |=
231 (phychan->signal << PL080_CONFIG_SRC_SEL_SHIFT);
232 /* Always enable error interrupts */
233 phychan->ccfg |= PL080_CONFIG_ERR_IRQ_MASK;
234 /* Always enable terminal interrupts */
235 phychan->ccfg |= PL080_CONFIG_TC_IRQ_MASK;
236}
237
238/*
239 * Enable the DMA channel
240 * Assumes all other configuration bits have been set
241 * as desired before this code is called
242 */
243static void pl08x_enable_phy_chan(struct pl08x_driver_data *pl08x,
244 struct pl08x_phy_chan *ch)
245{
246 u32 val;
247
248 /*
249 * Do not access config register until channel shows as disabled
250 */
251 while (readl(pl08x->base + PL080_EN_CHAN) & (1 << ch->id))
252 ;
253
254 /*
255 * Do not access config register until channel shows as inactive
256 */
257 val = readl(ch->base + PL080_CH_CONFIG);
258 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
259 val = readl(ch->base + PL080_CH_CONFIG);
260
261 writel(val | PL080_CONFIG_ENABLE, ch->base + PL080_CH_CONFIG);
262}
263
264/*
265 * Overall DMAC remains enabled always.
266 *
267 * Disabling individual channels could lose data.
268 *
269 * Disable the peripheral DMA after disabling the DMAC
270 * in order to allow the DMAC FIFO to drain, and
271 * hence allow the channel to show inactive
272 *
273 */
274static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
275{
276 u32 val;
277
278 /* Set the HALT bit and wait for the FIFO to drain */
279 val = readl(ch->base + PL080_CH_CONFIG);
280 val |= PL080_CONFIG_HALT;
281 writel(val, ch->base + PL080_CH_CONFIG);
282
283 /* Wait for channel inactive */
284 while (pl08x_phy_channel_busy(ch))
285 ;
286}
287
288static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
289{
290 u32 val;
291
292 /* Clear the HALT bit */
293 val = readl(ch->base + PL080_CH_CONFIG);
294 val &= ~PL080_CONFIG_HALT;
295 writel(val, ch->base + PL080_CH_CONFIG);
296}
297
298
299/* Stops the channel */
300static void pl08x_stop_phy_chan(struct pl08x_phy_chan *ch)
301{
302 u32 val;
303
304 pl08x_pause_phy_chan(ch);
305
306 /* Disable channel */
307 val = readl(ch->base + PL080_CH_CONFIG);
308 val &= ~PL080_CONFIG_ENABLE;
309 val &= ~PL080_CONFIG_ERR_IRQ_MASK;
310 val &= ~PL080_CONFIG_TC_IRQ_MASK;
311 writel(val, ch->base + PL080_CH_CONFIG);
312}
313
314static inline u32 get_bytes_in_cctl(u32 cctl)
315{
316 /* The source width defines the number of bytes */
317 u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK;
318
319 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
320 case PL080_WIDTH_8BIT:
321 break;
322 case PL080_WIDTH_16BIT:
323 bytes *= 2;
324 break;
325 case PL080_WIDTH_32BIT:
326 bytes *= 4;
327 break;
328 }
329 return bytes;
330}
331
332/* The channel should be paused when calling this */
333static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
334{
335 struct pl08x_phy_chan *ch;
336 struct pl08x_txd *txdi = NULL;
337 struct pl08x_txd *txd;
338 unsigned long flags;
339 u32 bytes = 0;
340
341 spin_lock_irqsave(&plchan->lock, flags);
342
343 ch = plchan->phychan;
344 txd = plchan->at;
345
346 /*
347 * Next follow the LLIs to get the number of pending bytes in the
348 * currently active transaction.
349 */
350 if (ch && txd) {
351 struct lli *llis_va = txd->llis_va;
352 struct lli *llis_bus = (struct lli *) txd->llis_bus;
353 u32 clli = readl(ch->base + PL080_CH_LLI);
354
355 /* First get the bytes in the current active LLI */
356 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
357
358 if (clli) {
359 int i = 0;
360
361 /* Forward to the LLI pointed to by clli */
362 while ((clli != (u32) &(llis_bus[i])) &&
363 (i < MAX_NUM_TSFR_LLIS))
364 i++;
365
366 while (clli) {
367 bytes += get_bytes_in_cctl(llis_va[i].cctl);
368 /*
369 * A clli of 0x00000000 will terminate the
370 * LLI list
371 */
372 clli = llis_va[i].next;
373 i++;
374 }
375 }
376 }
377
378 /* Sum up all queued transactions */
379 if (!list_empty(&plchan->desc_list)) {
380 list_for_each_entry(txdi, &plchan->desc_list, node) {
381 bytes += txdi->len;
382 }
383
384 }
385
386 spin_unlock_irqrestore(&plchan->lock, flags);
387
388 return bytes;
389}
390
391/*
392 * Allocate a physical channel for a virtual channel
393 */
394static struct pl08x_phy_chan *
395pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
396 struct pl08x_dma_chan *virt_chan)
397{
398 struct pl08x_phy_chan *ch = NULL;
399 unsigned long flags;
400 int i;
401
402 /*
403 * Try to locate a physical channel to be used for
404 * this transfer. If all are taken return NULL and
405 * the requester will have to cope by using some fallback
406 * PIO mode or retrying later.
407 */
408 for (i = 0; i < pl08x->vd->channels; i++) {
409 ch = &pl08x->phy_chans[i];
410
411 spin_lock_irqsave(&ch->lock, flags);
412
413 if (!ch->serving) {
414 ch->serving = virt_chan;
415 ch->signal = -1;
416 spin_unlock_irqrestore(&ch->lock, flags);
417 break;
418 }
419
420 spin_unlock_irqrestore(&ch->lock, flags);
421 }
422
423 if (i == pl08x->vd->channels) {
424 /* No physical channel available, cope with it */
425 return NULL;
426 }
427
428 return ch;
429}
430
431static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
432 struct pl08x_phy_chan *ch)
433{
434 unsigned long flags;
435
436 /* Stop the channel and clear its interrupts */
437 pl08x_stop_phy_chan(ch);
438 writel((1 << ch->id), pl08x->base + PL080_ERR_CLEAR);
439 writel((1 << ch->id), pl08x->base + PL080_TC_CLEAR);
440
441 /* Mark it as free */
442 spin_lock_irqsave(&ch->lock, flags);
443 ch->serving = NULL;
444 spin_unlock_irqrestore(&ch->lock, flags);
445}
446
447/*
448 * LLI handling
449 */
450
451static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded)
452{
453 switch (coded) {
454 case PL080_WIDTH_8BIT:
455 return 1;
456 case PL080_WIDTH_16BIT:
457 return 2;
458 case PL080_WIDTH_32BIT:
459 return 4;
460 default:
461 break;
462 }
463 BUG();
464 return 0;
465}
466
467static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
468 u32 tsize)
469{
470 u32 retbits = cctl;
471
472 /* Remove all src, dst and transfersize bits */
473 retbits &= ~PL080_CONTROL_DWIDTH_MASK;
474 retbits &= ~PL080_CONTROL_SWIDTH_MASK;
475 retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK;
476
477 /* Then set the bits according to the parameters */
478 switch (srcwidth) {
479 case 1:
480 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT;
481 break;
482 case 2:
483 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT;
484 break;
485 case 4:
486 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT;
487 break;
488 default:
489 BUG();
490 break;
491 }
492
493 switch (dstwidth) {
494 case 1:
495 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT;
496 break;
497 case 2:
498 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT;
499 break;
500 case 4:
501 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT;
502 break;
503 default:
504 BUG();
505 break;
506 }
507
508 retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT;
509 return retbits;
510}
511
512/*
513 * Autoselect a master bus to use for the transfer
514 * this prefers the destination bus if both available
515 * if fixed address on one bus the other will be chosen
516 */
517void pl08x_choose_master_bus(struct pl08x_bus_data *src_bus,
518 struct pl08x_bus_data *dst_bus, struct pl08x_bus_data **mbus,
519 struct pl08x_bus_data **sbus, u32 cctl)
520{
521 if (!(cctl & PL080_CONTROL_DST_INCR)) {
522 *mbus = src_bus;
523 *sbus = dst_bus;
524 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
525 *mbus = dst_bus;
526 *sbus = src_bus;
527 } else {
528 if (dst_bus->buswidth == 4) {
529 *mbus = dst_bus;
530 *sbus = src_bus;
531 } else if (src_bus->buswidth == 4) {
532 *mbus = src_bus;
533 *sbus = dst_bus;
534 } else if (dst_bus->buswidth == 2) {
535 *mbus = dst_bus;
536 *sbus = src_bus;
537 } else if (src_bus->buswidth == 2) {
538 *mbus = src_bus;
539 *sbus = dst_bus;
540 } else {
541 /* src_bus->buswidth == 1 */
542 *mbus = dst_bus;
543 *sbus = src_bus;
544 }
545 }
546}
547
548/*
549 * Fills in one LLI for a certain transfer descriptor
550 * and advance the counter
551 */
552int pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x,
553 struct pl08x_txd *txd, int num_llis, int len,
554 u32 cctl, u32 *remainder)
555{
556 struct lli *llis_va = txd->llis_va;
557 struct lli *llis_bus = (struct lli *) txd->llis_bus;
558
559 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
560
561 llis_va[num_llis].cctl = cctl;
562 llis_va[num_llis].src = txd->srcbus.addr;
563 llis_va[num_llis].dst = txd->dstbus.addr;
564
565 /*
566 * On versions with dual masters, you can optionally AND on
567 * PL080_LLI_LM_AHB2 to the LLI to tell the hardware to read
568 * in new LLIs with that controller, but we always try to
569 * choose AHB1 to point into memory. The idea is to have AHB2
570 * fixed on the peripheral and AHB1 messing around in the
571 * memory. So we don't manipulate this bit currently.
572 */
573
574 llis_va[num_llis].next =
575 (dma_addr_t)((u32) &(llis_bus[num_llis + 1]));
576
577 if (cctl & PL080_CONTROL_SRC_INCR)
578 txd->srcbus.addr += len;
579 if (cctl & PL080_CONTROL_DST_INCR)
580 txd->dstbus.addr += len;
581
582 *remainder -= len;
583
584 return num_llis + 1;
585}
586
587/*
588 * Return number of bytes to fill to boundary, or len
589 */
590static inline u32 pl08x_pre_boundary(u32 addr, u32 len)
591{
592 u32 boundary;
593
594 boundary = ((addr >> PL08X_BOUNDARY_SHIFT) + 1)
595 << PL08X_BOUNDARY_SHIFT;
596
597 if (boundary < addr + len)
598 return boundary - addr;
599 else
600 return len;
601}
602
603/*
604 * This fills in the table of LLIs for the transfer descriptor
605 * Note that we assume we never have to change the burst sizes
606 * Return 0 for error
607 */
608static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
609 struct pl08x_txd *txd)
610{
611 struct pl08x_channel_data *cd = txd->cd;
612 struct pl08x_bus_data *mbus, *sbus;
613 u32 remainder;
614 int num_llis = 0;
615 u32 cctl;
616 int max_bytes_per_lli;
617 int total_bytes = 0;
618 struct lli *llis_va;
619 struct lli *llis_bus;
620
621 if (!txd) {
622 dev_err(&pl08x->adev->dev, "%s no descriptor\n", __func__);
623 return 0;
624 }
625
626 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT,
627 &txd->llis_bus);
628 if (!txd->llis_va) {
629 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__);
630 return 0;
631 }
632
633 pl08x->pool_ctr++;
634
635 /*
636 * Initialize bus values for this transfer
637 * from the passed optimal values
638 */
639 if (!cd) {
640 dev_err(&pl08x->adev->dev, "%s no channel data\n", __func__);
641 return 0;
642 }
643
644 /* Get the default CCTL from the platform data */
645 cctl = cd->cctl;
646
647 /*
648 * On the PL080 we have two bus masters and we
649 * should select one for source and one for
650 * destination. We try to use AHB2 for the
651 * bus which does not increment (typically the
652 * peripheral) else we just choose something.
653 */
654 cctl &= ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
655 if (pl08x->vd->dualmaster) {
656 if (cctl & PL080_CONTROL_SRC_INCR)
657 /* Source increments, use AHB2 for destination */
658 cctl |= PL080_CONTROL_DST_AHB2;
659 else if (cctl & PL080_CONTROL_DST_INCR)
660 /* Destination increments, use AHB2 for source */
661 cctl |= PL080_CONTROL_SRC_AHB2;
662 else
663 /* Just pick something, source AHB1 dest AHB2 */
664 cctl |= PL080_CONTROL_DST_AHB2;
665 }
666
667 /* Find maximum width of the source bus */
668 txd->srcbus.maxwidth =
669 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >>
670 PL080_CONTROL_SWIDTH_SHIFT);
671
672 /* Find maximum width of the destination bus */
673 txd->dstbus.maxwidth =
674 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
675 PL080_CONTROL_DWIDTH_SHIFT);
676
677 /* Set up the bus widths to the maximum */
678 txd->srcbus.buswidth = txd->srcbus.maxwidth;
679 txd->dstbus.buswidth = txd->dstbus.maxwidth;
680 dev_vdbg(&pl08x->adev->dev,
681 "%s source bus is %d bytes wide, dest bus is %d bytes wide\n",
682 __func__, txd->srcbus.buswidth, txd->dstbus.buswidth);
683
684
685 /*
686 * Bytes transferred == tsize * MIN(buswidths), not max(buswidths)
687 */
688 max_bytes_per_lli = min(txd->srcbus.buswidth, txd->dstbus.buswidth) *
689 PL080_CONTROL_TRANSFER_SIZE_MASK;
690 dev_vdbg(&pl08x->adev->dev,
691 "%s max bytes per lli = %d\n",
692 __func__, max_bytes_per_lli);
693
694 /* We need to count this down to zero */
695 remainder = txd->len;
696 dev_vdbg(&pl08x->adev->dev,
697 "%s remainder = %d\n",
698 __func__, remainder);
699
700 /*
701 * Choose bus to align to
702 * - prefers destination bus if both available
703 * - if fixed address on one bus chooses other
704 * - modifies cctl to choose an apropriate master
705 */
706 pl08x_choose_master_bus(&txd->srcbus, &txd->dstbus,
707 &mbus, &sbus, cctl);
708
709
710 /*
711 * The lowest bit of the LLI register
712 * is also used to indicate which master to
713 * use for reading the LLIs.
714 */
715
716 if (txd->len < mbus->buswidth) {
717 /*
718 * Less than a bus width available
719 * - send as single bytes
720 */
721 while (remainder) {
722 dev_vdbg(&pl08x->adev->dev,
723 "%s single byte LLIs for a transfer of "
724 "less than a bus width (remain %08x)\n",
725 __func__, remainder);
726 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
727 num_llis =
728 pl08x_fill_lli_for_desc(pl08x, txd, num_llis, 1,
729 cctl, &remainder);
730 total_bytes++;
731 }
732 } else {
733 /*
734 * Make one byte LLIs until master bus is aligned
735 * - slave will then be aligned also
736 */
737 while ((mbus->addr) % (mbus->buswidth)) {
738 dev_vdbg(&pl08x->adev->dev,
739 "%s adjustment lli for less than bus width "
740 "(remain %08x)\n",
741 __func__, remainder);
742 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
743 num_llis = pl08x_fill_lli_for_desc
744 (pl08x, txd, num_llis, 1, cctl, &remainder);
745 total_bytes++;
746 }
747
748 /*
749 * Master now aligned
750 * - if slave is not then we must set its width down
751 */
752 if (sbus->addr % sbus->buswidth) {
753 dev_dbg(&pl08x->adev->dev,
754 "%s set down bus width to one byte\n",
755 __func__);
756
757 sbus->buswidth = 1;
758 }
759
760 /*
761 * Make largest possible LLIs until less than one bus
762 * width left
763 */
764 while (remainder > (mbus->buswidth - 1)) {
765 int lli_len, target_len;
766 int tsize;
767 int odd_bytes;
768
769 /*
770 * If enough left try to send max possible,
771 * otherwise try to send the remainder
772 */
773 target_len = remainder;
774 if (remainder > max_bytes_per_lli)
775 target_len = max_bytes_per_lli;
776
777 /*
778 * Set bus lengths for incrementing busses
779 * to number of bytes which fill to next memory
780 * boundary
781 */
782 if (cctl & PL080_CONTROL_SRC_INCR)
783 txd->srcbus.fill_bytes =
784 pl08x_pre_boundary(
785 txd->srcbus.addr,
786 remainder);
787 else
788 txd->srcbus.fill_bytes =
789 max_bytes_per_lli;
790
791 if (cctl & PL080_CONTROL_DST_INCR)
792 txd->dstbus.fill_bytes =
793 pl08x_pre_boundary(
794 txd->dstbus.addr,
795 remainder);
796 else
797 txd->dstbus.fill_bytes =
798 max_bytes_per_lli;
799
800 /*
801 * Find the nearest
802 */
803 lli_len = min(txd->srcbus.fill_bytes,
804 txd->dstbus.fill_bytes);
805
806 BUG_ON(lli_len > remainder);
807
808 if (lli_len <= 0) {
809 dev_err(&pl08x->adev->dev,
810 "%s lli_len is %d, <= 0\n",
811 __func__, lli_len);
812 return 0;
813 }
814
815 if (lli_len == target_len) {
816 /*
817 * Can send what we wanted
818 */
819 /*
820 * Maintain alignment
821 */
822 lli_len = (lli_len/mbus->buswidth) *
823 mbus->buswidth;
824 odd_bytes = 0;
825 } else {
826 /*
827 * So now we know how many bytes to transfer
828 * to get to the nearest boundary
829 * The next lli will past the boundary
830 * - however we may be working to a boundary
831 * on the slave bus
832 * We need to ensure the master stays aligned
833 */
834 odd_bytes = lli_len % mbus->buswidth;
835 /*
836 * - and that we are working in multiples
837 * of the bus widths
838 */
839 lli_len -= odd_bytes;
840
841 }
842
843 if (lli_len) {
844 /*
845 * Check against minimum bus alignment:
846 * Calculate actual transfer size in relation
847 * to bus width an get a maximum remainder of
848 * the smallest bus width - 1
849 */
850 /* FIXME: use round_down()? */
851 tsize = lli_len / min(mbus->buswidth,
852 sbus->buswidth);
853 lli_len = tsize * min(mbus->buswidth,
854 sbus->buswidth);
855
856 if (target_len != lli_len) {
857 dev_vdbg(&pl08x->adev->dev,
858 "%s can't send what we want. Desired %08x, lli of %08x bytes in txd of %08x\n",
859 __func__, target_len, lli_len, txd->len);
860 }
861
862 cctl = pl08x_cctl_bits(cctl,
863 txd->srcbus.buswidth,
864 txd->dstbus.buswidth,
865 tsize);
866
867 dev_vdbg(&pl08x->adev->dev,
868 "%s fill lli with single lli chunk of size %08x (remainder %08x)\n",
869 __func__, lli_len, remainder);
870 num_llis = pl08x_fill_lli_for_desc(pl08x, txd,
871 num_llis, lli_len, cctl,
872 &remainder);
873 total_bytes += lli_len;
874 }
875
876
877 if (odd_bytes) {
878 /*
879 * Creep past the boundary,
880 * maintaining master alignment
881 */
882 int j;
883 for (j = 0; (j < mbus->buswidth)
884 && (remainder); j++) {
885 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
886 dev_vdbg(&pl08x->adev->dev,
887 "%s align with boundardy, single byte (remain %08x)\n",
888 __func__, remainder);
889 num_llis =
890 pl08x_fill_lli_for_desc(pl08x,
891 txd, num_llis, 1,
892 cctl, &remainder);
893 total_bytes++;
894 }
895 }
896 }
897
898 /*
899 * Send any odd bytes
900 */
901 if (remainder < 0) {
902 dev_err(&pl08x->adev->dev, "%s remainder not fitted 0x%08x bytes\n",
903 __func__, remainder);
904 return 0;
905 }
906
907 while (remainder) {
908 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
909 dev_vdbg(&pl08x->adev->dev,
910 "%s align with boundardy, single odd byte (remain %d)\n",
911 __func__, remainder);
912 num_llis = pl08x_fill_lli_for_desc(pl08x, txd, num_llis,
913 1, cctl, &remainder);
914 total_bytes++;
915 }
916 }
917 if (total_bytes != txd->len) {
918 dev_err(&pl08x->adev->dev,
919 "%s size of encoded lli:s don't match total txd, transferred 0x%08x from size 0x%08x\n",
920 __func__, total_bytes, txd->len);
921 return 0;
922 }
923
924 if (num_llis >= MAX_NUM_TSFR_LLIS) {
925 dev_err(&pl08x->adev->dev,
926 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
927 __func__, (u32) MAX_NUM_TSFR_LLIS);
928 return 0;
929 }
930 /*
931 * Decide whether this is a loop or a terminated transfer
932 */
933 llis_va = txd->llis_va;
934 llis_bus = (struct lli *) txd->llis_bus;
935
936 if (cd->circular_buffer) {
937 /*
938 * Loop the circular buffer so that the next element
939 * points back to the beginning of the LLI.
940 */
941 llis_va[num_llis - 1].next =
942 (dma_addr_t)((unsigned int)&(llis_bus[0]));
943 } else {
944 /*
945 * On non-circular buffers, the final LLI terminates
946 * the LLI.
947 */
948 llis_va[num_llis - 1].next = 0;
949 /*
950 * The final LLI element shall also fire an interrupt
951 */
952 llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
953 }
954
955 /* Now store the channel register values */
956 txd->csrc = llis_va[0].src;
957 txd->cdst = llis_va[0].dst;
958 if (num_llis > 1)
959 txd->clli = llis_va[0].next;
960 else
961 txd->clli = 0;
962
963 txd->cctl = llis_va[0].cctl;
964 /* ccfg will be set at physical channel allocation time */
965
966#ifdef VERBOSE_DEBUG
967 {
968 int i;
969
970 for (i = 0; i < num_llis; i++) {
971 dev_vdbg(&pl08x->adev->dev,
972 "lli %d @%p: csrc=%08x, cdst=%08x, cctl=%08x, clli=%08x\n",
973 i,
974 &llis_va[i],
975 llis_va[i].src,
976 llis_va[i].dst,
977 llis_va[i].cctl,
978 llis_va[i].next
979 );
980 }
981 }
982#endif
983
984 return num_llis;
985}
986
987/* You should call this with the struct pl08x lock held */
988static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
989 struct pl08x_txd *txd)
990{
991 if (!txd)
992 dev_err(&pl08x->adev->dev,
993 "%s no descriptor to free\n",
994 __func__);
995
996 /* Free the LLI */
997 dma_pool_free(pl08x->pool, txd->llis_va,
998 txd->llis_bus);
999
1000 pl08x->pool_ctr--;
1001
1002 kfree(txd);
1003}
1004
1005static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
1006 struct pl08x_dma_chan *plchan)
1007{
1008 struct pl08x_txd *txdi = NULL;
1009 struct pl08x_txd *next;
1010
1011 if (!list_empty(&plchan->desc_list)) {
1012 list_for_each_entry_safe(txdi,
1013 next, &plchan->desc_list, node) {
1014 list_del(&txdi->node);
1015 pl08x_free_txd(pl08x, txdi);
1016 }
1017
1018 }
1019}
1020
1021/*
1022 * The DMA ENGINE API
1023 */
1024static int pl08x_alloc_chan_resources(struct dma_chan *chan)
1025{
1026 return 0;
1027}
1028
1029static void pl08x_free_chan_resources(struct dma_chan *chan)
1030{
1031}
1032
1033/*
1034 * This should be called with the channel plchan->lock held
1035 */
1036static int prep_phy_channel(struct pl08x_dma_chan *plchan,
1037 struct pl08x_txd *txd)
1038{
1039 struct pl08x_driver_data *pl08x = plchan->host;
1040 struct pl08x_phy_chan *ch;
1041 int ret;
1042
1043 /* Check if we already have a channel */
1044 if (plchan->phychan)
1045 return 0;
1046
1047 ch = pl08x_get_phy_channel(pl08x, plchan);
1048 if (!ch) {
1049 /* No physical channel available, cope with it */
1050 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
1051 return -EBUSY;
1052 }
1053
1054 /*
1055 * OK we have a physical channel: for memcpy() this is all we
1056 * need, but for slaves the physical signals may be muxed!
1057 * Can the platform allow us to use this channel?
1058 */
1059 if (plchan->slave &&
1060 ch->signal < 0 &&
1061 pl08x->pd->get_signal) {
1062 ret = pl08x->pd->get_signal(plchan);
1063 if (ret < 0) {
1064 dev_dbg(&pl08x->adev->dev,
1065 "unable to use physical channel %d for transfer on %s due to platform restrictions\n",
1066 ch->id, plchan->name);
1067 /* Release physical channel & return */
1068 pl08x_put_phy_channel(pl08x, ch);
1069 return -EBUSY;
1070 }
1071 ch->signal = ret;
1072 }
1073
1074 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
1075 ch->id,
1076 ch->signal,
1077 plchan->name);
1078
1079 plchan->phychan = ch;
1080
1081 return 0;
1082}
1083
1084static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
1085{
1086 struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
1087
1088 atomic_inc(&plchan->last_issued);
1089 tx->cookie = atomic_read(&plchan->last_issued);
1090 /* This unlock follows the lock in the prep() function */
1091 spin_unlock_irqrestore(&plchan->lock, plchan->lockflags);
1092
1093 return tx->cookie;
1094}
1095
1096static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
1097 struct dma_chan *chan, unsigned long flags)
1098{
1099 struct dma_async_tx_descriptor *retval = NULL;
1100
1101 return retval;
1102}
1103
1104/*
1105 * Code accessing dma_async_is_complete() in a tight loop
1106 * may give problems - could schedule where indicated.
1107 * If slaves are relying on interrupts to signal completion this
1108 * function must not be called with interrupts disabled
1109 */
1110static enum dma_status
1111pl08x_dma_tx_status(struct dma_chan *chan,
1112 dma_cookie_t cookie,
1113 struct dma_tx_state *txstate)
1114{
1115 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1116 dma_cookie_t last_used;
1117 dma_cookie_t last_complete;
1118 enum dma_status ret;
1119 u32 bytesleft = 0;
1120
1121 last_used = atomic_read(&plchan->last_issued);
1122 last_complete = plchan->lc;
1123
1124 ret = dma_async_is_complete(cookie, last_complete, last_used);
1125 if (ret == DMA_SUCCESS) {
1126 dma_set_tx_state(txstate, last_complete, last_used, 0);
1127 return ret;
1128 }
1129
1130 /*
1131 * schedule(); could be inserted here
1132 */
1133
1134 /*
1135 * This cookie not complete yet
1136 */
1137 last_used = atomic_read(&plchan->last_issued);
1138 last_complete = plchan->lc;
1139
1140 /* Get number of bytes left in the active transactions and queue */
1141 bytesleft = pl08x_getbytes_chan(plchan);
1142
1143 dma_set_tx_state(txstate, last_complete, last_used,
1144 bytesleft);
1145
1146 if (plchan->state == PL08X_CHAN_PAUSED)
1147 return DMA_PAUSED;
1148
1149 /* Whether waiting or running, we're in progress */
1150 return DMA_IN_PROGRESS;
1151}
1152
1153/* PrimeCell DMA extension */
1154struct burst_table {
1155 int burstwords;
1156 u32 reg;
1157};
1158
1159static const struct burst_table burst_sizes[] = {
1160 {
1161 .burstwords = 256,
1162 .reg = (PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT) |
1163 (PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT),
1164 },
1165 {
1166 .burstwords = 128,
1167 .reg = (PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT) |
1168 (PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT),
1169 },
1170 {
1171 .burstwords = 64,
1172 .reg = (PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT) |
1173 (PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT),
1174 },
1175 {
1176 .burstwords = 32,
1177 .reg = (PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT) |
1178 (PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT),
1179 },
1180 {
1181 .burstwords = 16,
1182 .reg = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT) |
1183 (PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT),
1184 },
1185 {
1186 .burstwords = 8,
1187 .reg = (PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT) |
1188 (PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT),
1189 },
1190 {
1191 .burstwords = 4,
1192 .reg = (PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT) |
1193 (PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT),
1194 },
1195 {
1196 .burstwords = 1,
1197 .reg = (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) |
1198 (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT),
1199 },
1200};
1201
1202static void dma_set_runtime_config(struct dma_chan *chan,
1203 struct dma_slave_config *config)
1204{
1205 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1206 struct pl08x_driver_data *pl08x = plchan->host;
1207 struct pl08x_channel_data *cd = plchan->cd;
1208 enum dma_slave_buswidth addr_width;
1209 u32 maxburst;
1210 u32 cctl = 0;
1211 /* Mask out all except src and dst channel */
1212 u32 ccfg = cd->ccfg & 0x000003DEU;
1213 int i = 0;
1214
1215 /* Transfer direction */
1216 plchan->runtime_direction = config->direction;
1217 if (config->direction == DMA_TO_DEVICE) {
1218 plchan->runtime_addr = config->dst_addr;
1219 cctl |= PL080_CONTROL_SRC_INCR;
1220 ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1221 addr_width = config->dst_addr_width;
1222 maxburst = config->dst_maxburst;
1223 } else if (config->direction == DMA_FROM_DEVICE) {
1224 plchan->runtime_addr = config->src_addr;
1225 cctl |= PL080_CONTROL_DST_INCR;
1226 ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1227 addr_width = config->src_addr_width;
1228 maxburst = config->src_maxburst;
1229 } else {
1230 dev_err(&pl08x->adev->dev,
1231 "bad runtime_config: alien transfer direction\n");
1232 return;
1233 }
1234
1235 switch (addr_width) {
1236 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1237 cctl |= (PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT) |
1238 (PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT);
1239 break;
1240 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1241 cctl |= (PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT) |
1242 (PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT);
1243 break;
1244 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1245 cctl |= (PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT) |
1246 (PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT);
1247 break;
1248 default:
1249 dev_err(&pl08x->adev->dev,
1250 "bad runtime_config: alien address width\n");
1251 return;
1252 }
1253
1254 /*
1255 * Now decide on a maxburst:
1256 * If this channel will only request single transfers, set
1257 * this down to ONE element.
1258 */
1259 if (plchan->cd->single) {
1260 cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) |
1261 (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT);
1262 } else {
1263 while (i < ARRAY_SIZE(burst_sizes)) {
1264 if (burst_sizes[i].burstwords <= maxburst)
1265 break;
1266 i++;
1267 }
1268 cctl |= burst_sizes[i].reg;
1269 }
1270
1271 /* Access the cell in privileged mode, non-bufferable, non-cacheable */
1272 cctl &= ~PL080_CONTROL_PROT_MASK;
1273 cctl |= PL080_CONTROL_PROT_SYS;
1274
1275 /* Modify the default channel data to fit PrimeCell request */
1276 cd->cctl = cctl;
1277 cd->ccfg = ccfg;
1278
1279 dev_dbg(&pl08x->adev->dev,
1280 "configured channel %s (%s) for %s, data width %d, "
1281 "maxburst %d words, LE, CCTL=%08x, CCFG=%08x\n",
1282 dma_chan_name(chan), plchan->name,
1283 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
1284 addr_width,
1285 maxburst,
1286 cctl, ccfg);
1287}
1288
1289/*
1290 * Slave transactions callback to the slave device to allow
1291 * synchronization of slave DMA signals with the DMAC enable
1292 */
1293static void pl08x_issue_pending(struct dma_chan *chan)
1294{
1295 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1296 struct pl08x_driver_data *pl08x = plchan->host;
1297 unsigned long flags;
1298
1299 spin_lock_irqsave(&plchan->lock, flags);
1300 /* Something is already active */
1301 if (plchan->at) {
1302 spin_unlock_irqrestore(&plchan->lock, flags);
1303 return;
1304 }
1305
1306 /* Didn't get a physical channel so waiting for it ... */
1307 if (plchan->state == PL08X_CHAN_WAITING)
1308 return;
1309
1310 /* Take the first element in the queue and execute it */
1311 if (!list_empty(&plchan->desc_list)) {
1312 struct pl08x_txd *next;
1313
1314 next = list_first_entry(&plchan->desc_list,
1315 struct pl08x_txd,
1316 node);
1317 list_del(&next->node);
1318 plchan->at = next;
1319 plchan->state = PL08X_CHAN_RUNNING;
1320
1321 /* Configure the physical channel for the active txd */
1322 pl08x_config_phychan_for_txd(plchan);
1323 pl08x_set_cregs(pl08x, plchan->phychan);
1324 pl08x_enable_phy_chan(pl08x, plchan->phychan);
1325 }
1326
1327 spin_unlock_irqrestore(&plchan->lock, flags);
1328}
1329
1330static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1331 struct pl08x_txd *txd)
1332{
1333 int num_llis;
1334 struct pl08x_driver_data *pl08x = plchan->host;
1335 int ret;
1336
1337 num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
1338
1339 if (!num_llis)
1340 return -EINVAL;
1341
1342 spin_lock_irqsave(&plchan->lock, plchan->lockflags);
1343
1344 /*
1345 * If this device is not using a circular buffer then
1346 * queue this new descriptor for transfer.
1347 * The descriptor for a circular buffer continues
1348 * to be used until the channel is freed.
1349 */
1350 if (txd->cd->circular_buffer)
1351 dev_err(&pl08x->adev->dev,
1352 "%s attempting to queue a circular buffer\n",
1353 __func__);
1354 else
1355 list_add_tail(&txd->node,
1356 &plchan->desc_list);
1357
1358 /*
1359 * See if we already have a physical channel allocated,
1360 * else this is the time to try to get one.
1361 */
1362 ret = prep_phy_channel(plchan, txd);
1363 if (ret) {
1364 /*
1365 * No physical channel available, we will
1366 * stack up the memcpy channels until there is a channel
1367 * available to handle it whereas slave transfers may
1368 * have been denied due to platform channel muxing restrictions
1369 * and since there is no guarantee that this will ever be
1370 * resolved, and since the signal must be aquired AFTER
1371 * aquiring the physical channel, we will let them be NACK:ed
1372 * with -EBUSY here. The drivers can alway retry the prep()
1373 * call if they are eager on doing this using DMA.
1374 */
1375 if (plchan->slave) {
1376 pl08x_free_txd_list(pl08x, plchan);
1377 spin_unlock_irqrestore(&plchan->lock, plchan->lockflags);
1378 return -EBUSY;
1379 }
1380 /* Do this memcpy whenever there is a channel ready */
1381 plchan->state = PL08X_CHAN_WAITING;
1382 plchan->waiting = txd;
1383 } else
1384 /*
1385 * Else we're all set, paused and ready to roll,
1386 * status will switch to PL08X_CHAN_RUNNING when
1387 * we call issue_pending(). If there is something
1388 * running on the channel already we don't change
1389 * its state.
1390 */
1391 if (plchan->state == PL08X_CHAN_IDLE)
1392 plchan->state = PL08X_CHAN_PAUSED;
1393
1394 /*
1395 * Notice that we leave plchan->lock locked on purpose:
1396 * it will be unlocked in the subsequent tx_submit()
1397 * call. This is a consequence of the current API.
1398 */
1399
1400 return 0;
1401}
1402
1403/*
1404 * Initialize a descriptor to be used by memcpy submit
1405 */
1406static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1407 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1408 size_t len, unsigned long flags)
1409{
1410 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1411 struct pl08x_driver_data *pl08x = plchan->host;
1412 struct pl08x_txd *txd;
1413 int ret;
1414
1415 txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
1416 if (!txd) {
1417 dev_err(&pl08x->adev->dev,
1418 "%s no memory for descriptor\n", __func__);
1419 return NULL;
1420 }
1421
1422 dma_async_tx_descriptor_init(&txd->tx, chan);
1423 txd->direction = DMA_NONE;
1424 txd->srcbus.addr = src;
1425 txd->dstbus.addr = dest;
1426
1427 /* Set platform data for m2m */
1428 txd->cd = &pl08x->pd->memcpy_channel;
1429 /* Both to be incremented or the code will break */
1430 txd->cd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
1431 txd->tx.tx_submit = pl08x_tx_submit;
1432 txd->tx.callback = NULL;
1433 txd->tx.callback_param = NULL;
1434 txd->len = len;
1435
1436 INIT_LIST_HEAD(&txd->node);
1437 ret = pl08x_prep_channel_resources(plchan, txd);
1438 if (ret)
1439 return NULL;
1440 /*
1441 * NB: the channel lock is held at this point so tx_submit()
1442 * must be called in direct succession.
1443 */
1444
1445 return &txd->tx;
1446}
1447
1448struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1449 struct dma_chan *chan, struct scatterlist *sgl,
1450 unsigned int sg_len, enum dma_data_direction direction,
1451 unsigned long flags)
1452{
1453 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1454 struct pl08x_driver_data *pl08x = plchan->host;
1455 struct pl08x_txd *txd;
1456 int ret;
1457
1458 /*
1459 * Current implementation ASSUMES only one sg
1460 */
1461 if (sg_len != 1) {
1462 dev_err(&pl08x->adev->dev, "%s prepared too long sglist\n",
1463 __func__);
1464 BUG();
1465 }
1466
1467 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
1468 __func__, sgl->length, plchan->name);
1469
1470 txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
1471 if (!txd) {
1472 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
1473 return NULL;
1474 }
1475
1476 dma_async_tx_descriptor_init(&txd->tx, chan);
1477
1478 if (direction != plchan->runtime_direction)
1479 dev_err(&pl08x->adev->dev, "%s DMA setup does not match "
1480 "the direction configured for the PrimeCell\n",
1481 __func__);
1482
1483 /*
1484 * Set up addresses, the PrimeCell configured address
1485 * will take precedence since this may configure the
1486 * channel target address dynamically at runtime.
1487 */
1488 txd->direction = direction;
1489 if (direction == DMA_TO_DEVICE) {
1490 txd->srcbus.addr = sgl->dma_address;
1491 if (plchan->runtime_addr)
1492 txd->dstbus.addr = plchan->runtime_addr;
1493 else
1494 txd->dstbus.addr = plchan->cd->addr;
1495 } else if (direction == DMA_FROM_DEVICE) {
1496 if (plchan->runtime_addr)
1497 txd->srcbus.addr = plchan->runtime_addr;
1498 else
1499 txd->srcbus.addr = plchan->cd->addr;
1500 txd->dstbus.addr = sgl->dma_address;
1501 } else {
1502 dev_err(&pl08x->adev->dev,
1503 "%s direction unsupported\n", __func__);
1504 return NULL;
1505 }
1506 txd->cd = plchan->cd;
1507 txd->tx.tx_submit = pl08x_tx_submit;
1508 txd->tx.callback = NULL;
1509 txd->tx.callback_param = NULL;
1510 txd->len = sgl->length;
1511 INIT_LIST_HEAD(&txd->node);
1512
1513 ret = pl08x_prep_channel_resources(plchan, txd);
1514 if (ret)
1515 return NULL;
1516 /*
1517 * NB: the channel lock is held at this point so tx_submit()
1518 * must be called in direct succession.
1519 */
1520
1521 return &txd->tx;
1522}
1523
1524static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1525 unsigned long arg)
1526{
1527 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1528 struct pl08x_driver_data *pl08x = plchan->host;
1529 unsigned long flags;
1530 int ret = 0;
1531
1532 /* Controls applicable to inactive channels */
1533 if (cmd == DMA_SLAVE_CONFIG) {
1534 dma_set_runtime_config(chan,
1535 (struct dma_slave_config *)
1536 arg);
1537 return 0;
1538 }
1539
1540 /*
1541 * Anything succeeds on channels with no physical allocation and
1542 * no queued transfers.
1543 */
1544 spin_lock_irqsave(&plchan->lock, flags);
1545 if (!plchan->phychan && !plchan->at) {
1546 spin_unlock_irqrestore(&plchan->lock, flags);
1547 return 0;
1548 }
1549
1550 switch (cmd) {
1551 case DMA_TERMINATE_ALL:
1552 plchan->state = PL08X_CHAN_IDLE;
1553
1554 if (plchan->phychan) {
1555 pl08x_stop_phy_chan(plchan->phychan);
1556
1557 /*
1558 * Mark physical channel as free and free any slave
1559 * signal
1560 */
1561 if ((plchan->phychan->signal >= 0) &&
1562 pl08x->pd->put_signal) {
1563 pl08x->pd->put_signal(plchan);
1564 plchan->phychan->signal = -1;
1565 }
1566 pl08x_put_phy_channel(pl08x, plchan->phychan);
1567 plchan->phychan = NULL;
1568 }
1569 /* Stop any pending tasklet */
1570 tasklet_disable(&plchan->tasklet);
1571 /* Dequeue jobs and free LLIs */
1572 if (plchan->at) {
1573 pl08x_free_txd(pl08x, plchan->at);
1574 plchan->at = NULL;
1575 }
1576 /* Dequeue jobs not yet fired as well */
1577 pl08x_free_txd_list(pl08x, plchan);
1578 break;
1579 case DMA_PAUSE:
1580 pl08x_pause_phy_chan(plchan->phychan);
1581 plchan->state = PL08X_CHAN_PAUSED;
1582 break;
1583 case DMA_RESUME:
1584 pl08x_resume_phy_chan(plchan->phychan);
1585 plchan->state = PL08X_CHAN_RUNNING;
1586 break;
1587 default:
1588 /* Unknown command */
1589 ret = -ENXIO;
1590 break;
1591 }
1592
1593 spin_unlock_irqrestore(&plchan->lock, flags);
1594
1595 return ret;
1596}
1597
1598bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
1599{
1600 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1601 char *name = chan_id;
1602
1603 /* Check that the channel is not taken! */
1604 if (!strcmp(plchan->name, name))
1605 return true;
1606
1607 return false;
1608}
1609
1610/*
1611 * Just check that the device is there and active
1612 * TODO: turn this bit on/off depending on the number of
1613 * physical channels actually used, if it is zero... well
1614 * shut it off. That will save some power. Cut the clock
1615 * at the same time.
1616 */
1617static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
1618{
1619 u32 val;
1620
1621 val = readl(pl08x->base + PL080_CONFIG);
1622 val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE);
1623 /* We implictly clear bit 1 and that means little-endian mode */
1624 val |= PL080_CONFIG_ENABLE;
1625 writel(val, pl08x->base + PL080_CONFIG);
1626}
1627
1628static void pl08x_tasklet(unsigned long data)
1629{
1630 struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
1631 struct pl08x_phy_chan *phychan = plchan->phychan;
1632 struct pl08x_driver_data *pl08x = plchan->host;
1633
1634 if (!plchan)
1635 BUG();
1636
1637 spin_lock(&plchan->lock);
1638
1639 if (plchan->at) {
1640 dma_async_tx_callback callback =
1641 plchan->at->tx.callback;
1642 void *callback_param =
1643 plchan->at->tx.callback_param;
1644
1645 /*
1646 * Update last completed
1647 */
1648 plchan->lc =
1649 (plchan->at->tx.cookie);
1650
1651 /*
1652 * Callback to signal completion
1653 */
1654 if (callback)
1655 callback(callback_param);
1656
1657 /*
1658 * Device callbacks should NOT clear
1659 * the current transaction on the channel
1660 * Linus: sometimes they should?
1661 */
1662 if (!plchan->at)
1663 BUG();
1664
1665 /*
1666 * Free the descriptor if it's not for a device
1667 * using a circular buffer
1668 */
1669 if (!plchan->at->cd->circular_buffer) {
1670 pl08x_free_txd(pl08x, plchan->at);
1671 plchan->at = NULL;
1672 }
1673 /*
1674 * else descriptor for circular
1675 * buffers only freed when
1676 * client has disabled dma
1677 */
1678 }
1679 /*
1680 * If a new descriptor is queued, set it up
1681 * plchan->at is NULL here
1682 */
1683 if (!list_empty(&plchan->desc_list)) {
1684 struct pl08x_txd *next;
1685
1686 next = list_first_entry(&plchan->desc_list,
1687 struct pl08x_txd,
1688 node);
1689 list_del(&next->node);
1690 plchan->at = next;
1691 /* Configure the physical channel for the next txd */
1692 pl08x_config_phychan_for_txd(plchan);
1693 pl08x_set_cregs(pl08x, plchan->phychan);
1694 pl08x_enable_phy_chan(pl08x, plchan->phychan);
1695 } else {
1696 struct pl08x_dma_chan *waiting = NULL;
1697
1698 /*
1699 * No more jobs, so free up the physical channel
1700 * Free any allocated signal on slave transfers too
1701 */
1702 if ((phychan->signal >= 0) && pl08x->pd->put_signal) {
1703 pl08x->pd->put_signal(plchan);
1704 phychan->signal = -1;
1705 }
1706 pl08x_put_phy_channel(pl08x, phychan);
1707 plchan->phychan = NULL;
1708 plchan->state = PL08X_CHAN_IDLE;
1709
1710 /*
1711 * And NOW before anyone else can grab that free:d
1712 * up physical channel, see if there is some memcpy
1713 * pending that seriously needs to start because of
1714 * being stacked up while we were choking the
1715 * physical channels with data.
1716 */
1717 list_for_each_entry(waiting, &pl08x->memcpy.channels,
1718 chan.device_node) {
1719 if (waiting->state == PL08X_CHAN_WAITING &&
1720 waiting->waiting != NULL) {
1721 int ret;
1722
1723 /* This should REALLY not fail now */
1724 ret = prep_phy_channel(waiting,
1725 waiting->waiting);
1726 BUG_ON(ret);
1727 waiting->state = PL08X_CHAN_RUNNING;
1728 waiting->waiting = NULL;
1729 pl08x_issue_pending(&waiting->chan);
1730 break;
1731 }
1732 }
1733 }
1734
1735 spin_unlock(&plchan->lock);
1736}
1737
1738static irqreturn_t pl08x_irq(int irq, void *dev)
1739{
1740 struct pl08x_driver_data *pl08x = dev;
1741 u32 mask = 0;
1742 u32 val;
1743 int i;
1744
1745 val = readl(pl08x->base + PL080_ERR_STATUS);
1746 if (val) {
1747 /*
1748 * An error interrupt (on one or more channels)
1749 */
1750 dev_err(&pl08x->adev->dev,
1751 "%s error interrupt, register value 0x%08x\n",
1752 __func__, val);
1753 /*
1754 * Simply clear ALL PL08X error interrupts,
1755 * regardless of channel and cause
1756 * FIXME: should be 0x00000003 on PL081 really.
1757 */
1758 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
1759 }
1760 val = readl(pl08x->base + PL080_INT_STATUS);
1761 for (i = 0; i < pl08x->vd->channels; i++) {
1762 if ((1 << i) & val) {
1763 /* Locate physical channel */
1764 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
1765 struct pl08x_dma_chan *plchan = phychan->serving;
1766
1767 /* Schedule tasklet on this channel */
1768 tasklet_schedule(&plchan->tasklet);
1769
1770 mask |= (1 << i);
1771 }
1772 }
1773 /*
1774 * Clear only the terminal interrupts on channels we processed
1775 */
1776 writel(mask, pl08x->base + PL080_TC_CLEAR);
1777
1778 return mask ? IRQ_HANDLED : IRQ_NONE;
1779}
1780
1781/*
1782 * Initialise the DMAC memcpy/slave channels.
1783 * Make a local wrapper to hold required data
1784 */
1785static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1786 struct dma_device *dmadev,
1787 unsigned int channels,
1788 bool slave)
1789{
1790 struct pl08x_dma_chan *chan;
1791 int i;
1792
1793 INIT_LIST_HEAD(&dmadev->channels);
1794 /*
1795 * Register as many many memcpy as we have physical channels,
1796 * we won't always be able to use all but the code will have
1797 * to cope with that situation.
1798 */
1799 for (i = 0; i < channels; i++) {
1800 chan = kzalloc(sizeof(struct pl08x_dma_chan), GFP_KERNEL);
1801 if (!chan) {
1802 dev_err(&pl08x->adev->dev,
1803 "%s no memory for channel\n", __func__);
1804 return -ENOMEM;
1805 }
1806
1807 chan->host = pl08x;
1808 chan->state = PL08X_CHAN_IDLE;
1809
1810 if (slave) {
1811 chan->slave = true;
1812 chan->name = pl08x->pd->slave_channels[i].bus_id;
1813 chan->cd = &pl08x->pd->slave_channels[i];
1814 } else {
1815 chan->cd = &pl08x->pd->memcpy_channel;
1816 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
1817 if (!chan->name) {
1818 kfree(chan);
1819 return -ENOMEM;
1820 }
1821 }
1822 dev_info(&pl08x->adev->dev,
1823 "initialize virtual channel \"%s\"\n",
1824 chan->name);
1825
1826 chan->chan.device = dmadev;
1827 atomic_set(&chan->last_issued, 0);
1828 chan->lc = atomic_read(&chan->last_issued);
1829
1830 spin_lock_init(&chan->lock);
1831 INIT_LIST_HEAD(&chan->desc_list);
1832 tasklet_init(&chan->tasklet, pl08x_tasklet,
1833 (unsigned long) chan);
1834
1835 list_add_tail(&chan->chan.device_node, &dmadev->channels);
1836 }
1837 dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
1838 i, slave ? "slave" : "memcpy");
1839 return i;
1840}
1841
1842static void pl08x_free_virtual_channels(struct dma_device *dmadev)
1843{
1844 struct pl08x_dma_chan *chan = NULL;
1845 struct pl08x_dma_chan *next;
1846
1847 list_for_each_entry_safe(chan,
1848 next, &dmadev->channels, chan.device_node) {
1849 list_del(&chan->chan.device_node);
1850 kfree(chan);
1851 }
1852}
1853
1854#ifdef CONFIG_DEBUG_FS
1855static const char *pl08x_state_str(enum pl08x_dma_chan_state state)
1856{
1857 switch (state) {
1858 case PL08X_CHAN_IDLE:
1859 return "idle";
1860 case PL08X_CHAN_RUNNING:
1861 return "running";
1862 case PL08X_CHAN_PAUSED:
1863 return "paused";
1864 case PL08X_CHAN_WAITING:
1865 return "waiting";
1866 default:
1867 break;
1868 }
1869 return "UNKNOWN STATE";
1870}
1871
1872static int pl08x_debugfs_show(struct seq_file *s, void *data)
1873{
1874 struct pl08x_driver_data *pl08x = s->private;
1875 struct pl08x_dma_chan *chan;
1876 struct pl08x_phy_chan *ch;
1877 unsigned long flags;
1878 int i;
1879
1880 seq_printf(s, "PL08x physical channels:\n");
1881 seq_printf(s, "CHANNEL:\tUSER:\n");
1882 seq_printf(s, "--------\t-----\n");
1883 for (i = 0; i < pl08x->vd->channels; i++) {
1884 struct pl08x_dma_chan *virt_chan;
1885
1886 ch = &pl08x->phy_chans[i];
1887
1888 spin_lock_irqsave(&ch->lock, flags);
1889 virt_chan = ch->serving;
1890
1891 seq_printf(s, "%d\t\t%s\n",
1892 ch->id, virt_chan ? virt_chan->name : "(none)");
1893
1894 spin_unlock_irqrestore(&ch->lock, flags);
1895 }
1896
1897 seq_printf(s, "\nPL08x virtual memcpy channels:\n");
1898 seq_printf(s, "CHANNEL:\tSTATE:\n");
1899 seq_printf(s, "--------\t------\n");
1900 list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) {
1901 seq_printf(s, "%s\t\t\%s\n", chan->name,
1902 pl08x_state_str(chan->state));
1903 }
1904
1905 seq_printf(s, "\nPL08x virtual slave channels:\n");
1906 seq_printf(s, "CHANNEL:\tSTATE:\n");
1907 seq_printf(s, "--------\t------\n");
1908 list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) {
1909 seq_printf(s, "%s\t\t\%s\n", chan->name,
1910 pl08x_state_str(chan->state));
1911 }
1912
1913 return 0;
1914}
1915
1916static int pl08x_debugfs_open(struct inode *inode, struct file *file)
1917{
1918 return single_open(file, pl08x_debugfs_show, inode->i_private);
1919}
1920
1921static const struct file_operations pl08x_debugfs_operations = {
1922 .open = pl08x_debugfs_open,
1923 .read = seq_read,
1924 .llseek = seq_lseek,
1925 .release = single_release,
1926};
1927
1928static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
1929{
1930 /* Expose a simple debugfs interface to view all clocks */
1931 (void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO,
1932 NULL, pl08x,
1933 &pl08x_debugfs_operations);
1934}
1935
1936#else
1937static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
1938{
1939}
1940#endif
1941
1942static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
1943{
1944 struct pl08x_driver_data *pl08x;
1945 struct vendor_data *vd = id->data;
1946 int ret = 0;
1947 int i;
1948
1949 ret = amba_request_regions(adev, NULL);
1950 if (ret)
1951 return ret;
1952
1953 /* Create the driver state holder */
1954 pl08x = kzalloc(sizeof(struct pl08x_driver_data), GFP_KERNEL);
1955 if (!pl08x) {
1956 ret = -ENOMEM;
1957 goto out_no_pl08x;
1958 }
1959
1960 /* Initialize memcpy engine */
1961 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
1962 pl08x->memcpy.dev = &adev->dev;
1963 pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources;
1964 pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources;
1965 pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy;
1966 pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
1967 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
1968 pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
1969 pl08x->memcpy.device_control = pl08x_control;
1970
1971 /* Initialize slave engine */
1972 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
1973 pl08x->slave.dev = &adev->dev;
1974 pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources;
1975 pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
1976 pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
1977 pl08x->slave.device_tx_status = pl08x_dma_tx_status;
1978 pl08x->slave.device_issue_pending = pl08x_issue_pending;
1979 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
1980 pl08x->slave.device_control = pl08x_control;
1981
1982 /* Get the platform data */
1983 pl08x->pd = dev_get_platdata(&adev->dev);
1984 if (!pl08x->pd) {
1985 dev_err(&adev->dev, "no platform data supplied\n");
1986 goto out_no_platdata;
1987 }
1988
1989 /* Assign useful pointers to the driver state */
1990 pl08x->adev = adev;
1991 pl08x->vd = vd;
1992
1993 /* A DMA memory pool for LLIs, align on 1-byte boundary */
1994 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
1995 PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0);
1996 if (!pl08x->pool) {
1997 ret = -ENOMEM;
1998 goto out_no_lli_pool;
1999 }
2000
2001 spin_lock_init(&pl08x->lock);
2002
2003 pl08x->base = ioremap(adev->res.start, resource_size(&adev->res));
2004 if (!pl08x->base) {
2005 ret = -ENOMEM;
2006 goto out_no_ioremap;
2007 }
2008
2009 /* Turn on the PL08x */
2010 pl08x_ensure_on(pl08x);
2011
2012 /*
2013 * Attach the interrupt handler
2014 */
2015 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
2016 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
2017
2018 ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED,
2019 vd->name, pl08x);
2020 if (ret) {
2021 dev_err(&adev->dev, "%s failed to request interrupt %d\n",
2022 __func__, adev->irq[0]);
2023 goto out_no_irq;
2024 }
2025
2026 /* Initialize physical channels */
2027 pl08x->phy_chans = kmalloc((vd->channels * sizeof(struct pl08x_phy_chan)),
2028 GFP_KERNEL);
2029 if (!pl08x->phy_chans) {
2030 dev_err(&adev->dev, "%s failed to allocate "
2031 "physical channel holders\n",
2032 __func__);
2033 goto out_no_phychans;
2034 }
2035
2036 for (i = 0; i < vd->channels; i++) {
2037 struct pl08x_phy_chan *ch = &pl08x->phy_chans[i];
2038
2039 ch->id = i;
2040 ch->base = pl08x->base + PL080_Cx_BASE(i);
2041 spin_lock_init(&ch->lock);
2042 ch->serving = NULL;
2043 ch->signal = -1;
2044 dev_info(&adev->dev,
2045 "physical channel %d is %s\n", i,
2046 pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
2047 }
2048
2049 /* Register as many memcpy channels as there are physical channels */
2050 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy,
2051 pl08x->vd->channels, false);
2052 if (ret <= 0) {
2053 dev_warn(&pl08x->adev->dev,
2054 "%s failed to enumerate memcpy channels - %d\n",
2055 __func__, ret);
2056 goto out_no_memcpy;
2057 }
2058 pl08x->memcpy.chancnt = ret;
2059
2060 /* Register slave channels */
2061 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
2062 pl08x->pd->num_slave_channels,
2063 true);
2064 if (ret <= 0) {
2065 dev_warn(&pl08x->adev->dev,
2066 "%s failed to enumerate slave channels - %d\n",
2067 __func__, ret);
2068 goto out_no_slave;
2069 }
2070 pl08x->slave.chancnt = ret;
2071
2072 ret = dma_async_device_register(&pl08x->memcpy);
2073 if (ret) {
2074 dev_warn(&pl08x->adev->dev,
2075 "%s failed to register memcpy as an async device - %d\n",
2076 __func__, ret);
2077 goto out_no_memcpy_reg;
2078 }
2079
2080 ret = dma_async_device_register(&pl08x->slave);
2081 if (ret) {
2082 dev_warn(&pl08x->adev->dev,
2083 "%s failed to register slave as an async device - %d\n",
2084 __func__, ret);
2085 goto out_no_slave_reg;
2086 }
2087
2088 amba_set_drvdata(adev, pl08x);
2089 init_pl08x_debugfs(pl08x);
2090 dev_info(&pl08x->adev->dev, "ARM(R) %s DMA block initialized @%08x\n",
2091 vd->name, adev->res.start);
2092 return 0;
2093
2094out_no_slave_reg:
2095 dma_async_device_unregister(&pl08x->memcpy);
2096out_no_memcpy_reg:
2097 pl08x_free_virtual_channels(&pl08x->slave);
2098out_no_slave:
2099 pl08x_free_virtual_channels(&pl08x->memcpy);
2100out_no_memcpy:
2101 kfree(pl08x->phy_chans);
2102out_no_phychans:
2103 free_irq(adev->irq[0], pl08x);
2104out_no_irq:
2105 iounmap(pl08x->base);
2106out_no_ioremap:
2107 dma_pool_destroy(pl08x->pool);
2108out_no_lli_pool:
2109out_no_platdata:
2110 kfree(pl08x);
2111out_no_pl08x:
2112 amba_release_regions(adev);
2113 return ret;
2114}
2115
2116/* PL080 has 8 channels and the PL080 have just 2 */
2117static struct vendor_data vendor_pl080 = {
2118 .name = "PL080",
2119 .channels = 8,
2120 .dualmaster = true,
2121};
2122
2123static struct vendor_data vendor_pl081 = {
2124 .name = "PL081",
2125 .channels = 2,
2126 .dualmaster = false,
2127};
2128
2129static struct amba_id pl08x_ids[] = {
2130 /* PL080 */
2131 {
2132 .id = 0x00041080,
2133 .mask = 0x000fffff,
2134 .data = &vendor_pl080,
2135 },
2136 /* PL081 */
2137 {
2138 .id = 0x00041081,
2139 .mask = 0x000fffff,
2140 .data = &vendor_pl081,
2141 },
2142 /* Nomadik 8815 PL080 variant */
2143 {
2144 .id = 0x00280880,
2145 .mask = 0x00ffffff,
2146 .data = &vendor_pl080,
2147 },
2148 { 0, 0 },
2149};
2150
2151static struct amba_driver pl08x_amba_driver = {
2152 .drv.name = DRIVER_NAME,
2153 .id_table = pl08x_ids,
2154 .probe = pl08x_probe,
2155};
2156
2157static int __init pl08x_init(void)
2158{
2159 int retval;
2160 retval = amba_driver_register(&pl08x_amba_driver);
2161 if (retval)
2162 printk(KERN_WARNING DRIVER_NAME
2163 "failed to register as an amba device (%d)\n",
2164 retval);
2165 return retval;
2166}
2167subsys_initcall(pl08x_init);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index db403b8ccabd..235153cd7ac5 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -694,6 +694,8 @@ int dma_async_device_register(struct dma_device *device)
694 !device->device_prep_dma_sg); 694 !device->device_prep_dma_sg);
695 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && 695 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
696 !device->device_prep_slave_sg); 696 !device->device_prep_slave_sg);
697 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
698 !device->device_prep_dma_cyclic);
697 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && 699 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
698 !device->device_control); 700 !device->device_control);
699 701
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
new file mode 100644
index 000000000000..346be6218058
--- /dev/null
+++ b/drivers/dma/imx-dma.c
@@ -0,0 +1,422 @@
1/*
2 * drivers/dma/imx-dma.c
3 *
4 * This file contains a driver for the Freescale i.MX DMA engine
5 * found on i.MX1/21/27
6 *
7 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
8 *
9 * The code contained herein is licensed under the GNU General Public
10 * License. You may obtain a copy of the GNU General Public License
11 * Version 2 or later at the following locations:
12 *
13 * http://www.opensource.org/licenses/gpl-license.html
14 * http://www.gnu.org/copyleft/gpl.html
15 */
16#include <linux/init.h>
17#include <linux/types.h>
18#include <linux/mm.h>
19#include <linux/interrupt.h>
20#include <linux/spinlock.h>
21#include <linux/device.h>
22#include <linux/dma-mapping.h>
23#include <linux/slab.h>
24#include <linux/platform_device.h>
25#include <linux/dmaengine.h>
26
27#include <asm/irq.h>
28#include <mach/dma-v1.h>
29#include <mach/hardware.h>
30
31struct imxdma_channel {
32 struct imxdma_engine *imxdma;
33 unsigned int channel;
34 unsigned int imxdma_channel;
35
36 enum dma_slave_buswidth word_size;
37 dma_addr_t per_address;
38 u32 watermark_level;
39 struct dma_chan chan;
40 spinlock_t lock;
41 struct dma_async_tx_descriptor desc;
42 dma_cookie_t last_completed;
43 enum dma_status status;
44 int dma_request;
45 struct scatterlist *sg_list;
46};
47
48#define MAX_DMA_CHANNELS 8
49
50struct imxdma_engine {
51 struct device *dev;
52 struct dma_device dma_device;
53 struct imxdma_channel channel[MAX_DMA_CHANNELS];
54};
55
56static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
57{
58 return container_of(chan, struct imxdma_channel, chan);
59}
60
61static void imxdma_handle(struct imxdma_channel *imxdmac)
62{
63 if (imxdmac->desc.callback)
64 imxdmac->desc.callback(imxdmac->desc.callback_param);
65 imxdmac->last_completed = imxdmac->desc.cookie;
66}
67
68static void imxdma_irq_handler(int channel, void *data)
69{
70 struct imxdma_channel *imxdmac = data;
71
72 imxdmac->status = DMA_SUCCESS;
73 imxdma_handle(imxdmac);
74}
75
76static void imxdma_err_handler(int channel, void *data, int error)
77{
78 struct imxdma_channel *imxdmac = data;
79
80 imxdmac->status = DMA_ERROR;
81 imxdma_handle(imxdmac);
82}
83
84static void imxdma_progression(int channel, void *data,
85 struct scatterlist *sg)
86{
87 struct imxdma_channel *imxdmac = data;
88
89 imxdmac->status = DMA_SUCCESS;
90 imxdma_handle(imxdmac);
91}
92
93static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
94 unsigned long arg)
95{
96 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
97 struct dma_slave_config *dmaengine_cfg = (void *)arg;
98 int ret;
99 unsigned int mode = 0;
100
101 switch (cmd) {
102 case DMA_TERMINATE_ALL:
103 imxdmac->status = DMA_ERROR;
104 imx_dma_disable(imxdmac->imxdma_channel);
105 return 0;
106 case DMA_SLAVE_CONFIG:
107 if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
108 imxdmac->per_address = dmaengine_cfg->src_addr;
109 imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
110 imxdmac->word_size = dmaengine_cfg->src_addr_width;
111 } else {
112 imxdmac->per_address = dmaengine_cfg->dst_addr;
113 imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
114 imxdmac->word_size = dmaengine_cfg->dst_addr_width;
115 }
116
117 switch (imxdmac->word_size) {
118 case DMA_SLAVE_BUSWIDTH_1_BYTE:
119 mode = IMX_DMA_MEMSIZE_8;
120 break;
121 case DMA_SLAVE_BUSWIDTH_2_BYTES:
122 mode = IMX_DMA_MEMSIZE_16;
123 break;
124 default:
125 case DMA_SLAVE_BUSWIDTH_4_BYTES:
126 mode = IMX_DMA_MEMSIZE_32;
127 break;
128 }
129 ret = imx_dma_config_channel(imxdmac->imxdma_channel,
130 mode | IMX_DMA_TYPE_FIFO,
131 IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
132 imxdmac->dma_request, 1);
133
134 if (ret)
135 return ret;
136
137 imx_dma_config_burstlen(imxdmac->imxdma_channel, imxdmac->watermark_level);
138
139 return 0;
140 default:
141 return -ENOSYS;
142 }
143
144 return -EINVAL;
145}
146
147static enum dma_status imxdma_tx_status(struct dma_chan *chan,
148 dma_cookie_t cookie,
149 struct dma_tx_state *txstate)
150{
151 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
152 dma_cookie_t last_used;
153 enum dma_status ret;
154
155 last_used = chan->cookie;
156
157 ret = dma_async_is_complete(cookie, imxdmac->last_completed, last_used);
158 dma_set_tx_state(txstate, imxdmac->last_completed, last_used, 0);
159
160 return ret;
161}
162
163static dma_cookie_t imxdma_assign_cookie(struct imxdma_channel *imxdma)
164{
165 dma_cookie_t cookie = imxdma->chan.cookie;
166
167 if (++cookie < 0)
168 cookie = 1;
169
170 imxdma->chan.cookie = cookie;
171 imxdma->desc.cookie = cookie;
172
173 return cookie;
174}
175
176static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
177{
178 struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
179 dma_cookie_t cookie;
180
181 spin_lock_irq(&imxdmac->lock);
182
183 cookie = imxdma_assign_cookie(imxdmac);
184
185 imx_dma_enable(imxdmac->imxdma_channel);
186
187 spin_unlock_irq(&imxdmac->lock);
188
189 return cookie;
190}
191
192static int imxdma_alloc_chan_resources(struct dma_chan *chan)
193{
194 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
195 struct imx_dma_data *data = chan->private;
196
197 imxdmac->dma_request = data->dma_request;
198
199 dma_async_tx_descriptor_init(&imxdmac->desc, chan);
200 imxdmac->desc.tx_submit = imxdma_tx_submit;
201 /* txd.flags will be overwritten in prep funcs */
202 imxdmac->desc.flags = DMA_CTRL_ACK;
203
204 imxdmac->status = DMA_SUCCESS;
205
206 return 0;
207}
208
209static void imxdma_free_chan_resources(struct dma_chan *chan)
210{
211 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
212
213 imx_dma_disable(imxdmac->imxdma_channel);
214
215 if (imxdmac->sg_list) {
216 kfree(imxdmac->sg_list);
217 imxdmac->sg_list = NULL;
218 }
219}
220
221static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
222 struct dma_chan *chan, struct scatterlist *sgl,
223 unsigned int sg_len, enum dma_data_direction direction,
224 unsigned long flags)
225{
226 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
227 struct scatterlist *sg;
228 int i, ret, dma_length = 0;
229 unsigned int dmamode;
230
231 if (imxdmac->status == DMA_IN_PROGRESS)
232 return NULL;
233
234 imxdmac->status = DMA_IN_PROGRESS;
235
236 for_each_sg(sgl, sg, sg_len, i) {
237 dma_length += sg->length;
238 }
239
240 if (direction == DMA_FROM_DEVICE)
241 dmamode = DMA_MODE_READ;
242 else
243 dmamode = DMA_MODE_WRITE;
244
245 ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len,
246 dma_length, imxdmac->per_address, dmamode);
247 if (ret)
248 return NULL;
249
250 return &imxdmac->desc;
251}
252
253static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
254 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
255 size_t period_len, enum dma_data_direction direction)
256{
257 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
258 struct imxdma_engine *imxdma = imxdmac->imxdma;
259 int i, ret;
260 unsigned int periods = buf_len / period_len;
261 unsigned int dmamode;
262
263 dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
264 __func__, imxdmac->channel, buf_len, period_len);
265
266 if (imxdmac->status == DMA_IN_PROGRESS)
267 return NULL;
268 imxdmac->status = DMA_IN_PROGRESS;
269
270 ret = imx_dma_setup_progression_handler(imxdmac->imxdma_channel,
271 imxdma_progression);
272 if (ret) {
273 dev_err(imxdma->dev, "Failed to setup the DMA handler\n");
274 return NULL;
275 }
276
277 if (imxdmac->sg_list)
278 kfree(imxdmac->sg_list);
279
280 imxdmac->sg_list = kcalloc(periods + 1,
281 sizeof(struct scatterlist), GFP_KERNEL);
282 if (!imxdmac->sg_list)
283 return NULL;
284
285 sg_init_table(imxdmac->sg_list, periods);
286
287 for (i = 0; i < periods; i++) {
288 imxdmac->sg_list[i].page_link = 0;
289 imxdmac->sg_list[i].offset = 0;
290 imxdmac->sg_list[i].dma_address = dma_addr;
291 imxdmac->sg_list[i].length = period_len;
292 dma_addr += period_len;
293 }
294
295 /* close the loop */
296 imxdmac->sg_list[periods].offset = 0;
297 imxdmac->sg_list[periods].length = 0;
298 imxdmac->sg_list[periods].page_link =
299 ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
300
301 if (direction == DMA_FROM_DEVICE)
302 dmamode = DMA_MODE_READ;
303 else
304 dmamode = DMA_MODE_WRITE;
305
306 ret = imx_dma_setup_sg(imxdmac->imxdma_channel, imxdmac->sg_list, periods,
307 IMX_DMA_LENGTH_LOOP, imxdmac->per_address, dmamode);
308 if (ret)
309 return NULL;
310
311 return &imxdmac->desc;
312}
313
314static void imxdma_issue_pending(struct dma_chan *chan)
315{
316 /*
317 * Nothing to do. We only have a single descriptor
318 */
319}
320
321static int __init imxdma_probe(struct platform_device *pdev)
322{
323 struct imxdma_engine *imxdma;
324 int ret, i;
325
326 imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL);
327 if (!imxdma)
328 return -ENOMEM;
329
330 INIT_LIST_HEAD(&imxdma->dma_device.channels);
331
332 /* Initialize channel parameters */
333 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
334 struct imxdma_channel *imxdmac = &imxdma->channel[i];
335
336 imxdmac->imxdma_channel = imx_dma_request_by_prio("dmaengine",
337 DMA_PRIO_MEDIUM);
338 if (imxdmac->channel < 0)
339 goto err_init;
340
341 imx_dma_setup_handlers(imxdmac->imxdma_channel,
342 imxdma_irq_handler, imxdma_err_handler, imxdmac);
343
344 imxdmac->imxdma = imxdma;
345 spin_lock_init(&imxdmac->lock);
346
347 dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
348 dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
349
350 imxdmac->chan.device = &imxdma->dma_device;
351 imxdmac->chan.chan_id = i;
352 imxdmac->channel = i;
353
354 /* Add the channel to the DMAC list */
355 list_add_tail(&imxdmac->chan.device_node, &imxdma->dma_device.channels);
356 }
357
358 imxdma->dev = &pdev->dev;
359 imxdma->dma_device.dev = &pdev->dev;
360
361 imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
362 imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
363 imxdma->dma_device.device_tx_status = imxdma_tx_status;
364 imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
365 imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
366 imxdma->dma_device.device_control = imxdma_control;
367 imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
368
369 platform_set_drvdata(pdev, imxdma);
370
371 ret = dma_async_device_register(&imxdma->dma_device);
372 if (ret) {
373 dev_err(&pdev->dev, "unable to register\n");
374 goto err_init;
375 }
376
377 return 0;
378
379err_init:
380 while (i-- >= 0) {
381 struct imxdma_channel *imxdmac = &imxdma->channel[i];
382 imx_dma_free(imxdmac->imxdma_channel);
383 }
384
385 kfree(imxdma);
386 return ret;
387}
388
389static int __exit imxdma_remove(struct platform_device *pdev)
390{
391 struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
392 int i;
393
394 dma_async_device_unregister(&imxdma->dma_device);
395
396 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
397 struct imxdma_channel *imxdmac = &imxdma->channel[i];
398
399 imx_dma_free(imxdmac->imxdma_channel);
400 }
401
402 kfree(imxdma);
403
404 return 0;
405}
406
407static struct platform_driver imxdma_driver = {
408 .driver = {
409 .name = "imx-dma",
410 },
411 .remove = __exit_p(imxdma_remove),
412};
413
414static int __init imxdma_module_init(void)
415{
416 return platform_driver_probe(&imxdma_driver, imxdma_probe);
417}
418subsys_initcall(imxdma_module_init);
419
420MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
421MODULE_DESCRIPTION("i.MX dma driver");
422MODULE_LICENSE("GPL");
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
new file mode 100644
index 000000000000..0834323a0599
--- /dev/null
+++ b/drivers/dma/imx-sdma.c
@@ -0,0 +1,1392 @@
1/*
2 * drivers/dma/imx-sdma.c
3 *
4 * This file contains a driver for the Freescale Smart DMA engine
5 *
6 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
7 *
8 * Based on code from Freescale:
9 *
10 * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
11 *
12 * The code contained herein is licensed under the GNU General Public
13 * License. You may obtain a copy of the GNU General Public License
14 * Version 2 or later at the following locations:
15 *
16 * http://www.opensource.org/licenses/gpl-license.html
17 * http://www.gnu.org/copyleft/gpl.html
18 */
19
20#include <linux/init.h>
21#include <linux/types.h>
22#include <linux/mm.h>
23#include <linux/interrupt.h>
24#include <linux/clk.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
27#include <linux/semaphore.h>
28#include <linux/spinlock.h>
29#include <linux/device.h>
30#include <linux/dma-mapping.h>
31#include <linux/firmware.h>
32#include <linux/slab.h>
33#include <linux/platform_device.h>
34#include <linux/dmaengine.h>
35
36#include <asm/irq.h>
37#include <mach/sdma.h>
38#include <mach/dma.h>
39#include <mach/hardware.h>
40
41/* SDMA registers */
42#define SDMA_H_C0PTR 0x000
43#define SDMA_H_INTR 0x004
44#define SDMA_H_STATSTOP 0x008
45#define SDMA_H_START 0x00c
46#define SDMA_H_EVTOVR 0x010
47#define SDMA_H_DSPOVR 0x014
48#define SDMA_H_HOSTOVR 0x018
49#define SDMA_H_EVTPEND 0x01c
50#define SDMA_H_DSPENBL 0x020
51#define SDMA_H_RESET 0x024
52#define SDMA_H_EVTERR 0x028
53#define SDMA_H_INTRMSK 0x02c
54#define SDMA_H_PSW 0x030
55#define SDMA_H_EVTERRDBG 0x034
56#define SDMA_H_CONFIG 0x038
57#define SDMA_ONCE_ENB 0x040
58#define SDMA_ONCE_DATA 0x044
59#define SDMA_ONCE_INSTR 0x048
60#define SDMA_ONCE_STAT 0x04c
61#define SDMA_ONCE_CMD 0x050
62#define SDMA_EVT_MIRROR 0x054
63#define SDMA_ILLINSTADDR 0x058
64#define SDMA_CHN0ADDR 0x05c
65#define SDMA_ONCE_RTB 0x060
66#define SDMA_XTRIG_CONF1 0x070
67#define SDMA_XTRIG_CONF2 0x074
68#define SDMA_CHNENBL0_V2 0x200
69#define SDMA_CHNENBL0_V1 0x080
70#define SDMA_CHNPRI_0 0x100
71
72/*
73 * Buffer descriptor status values.
74 */
75#define BD_DONE 0x01
76#define BD_WRAP 0x02
77#define BD_CONT 0x04
78#define BD_INTR 0x08
79#define BD_RROR 0x10
80#define BD_LAST 0x20
81#define BD_EXTD 0x80
82
83/*
84 * Data Node descriptor status values.
85 */
86#define DND_END_OF_FRAME 0x80
87#define DND_END_OF_XFER 0x40
88#define DND_DONE 0x20
89#define DND_UNUSED 0x01
90
91/*
92 * IPCV2 descriptor status values.
93 */
94#define BD_IPCV2_END_OF_FRAME 0x40
95
96#define IPCV2_MAX_NODES 50
97/*
98 * Error bit set in the CCB status field by the SDMA,
99 * in setbd routine, in case of a transfer error
100 */
101#define DATA_ERROR 0x10000000
102
103/*
104 * Buffer descriptor commands.
105 */
106#define C0_ADDR 0x01
107#define C0_LOAD 0x02
108#define C0_DUMP 0x03
109#define C0_SETCTX 0x07
110#define C0_GETCTX 0x03
111#define C0_SETDM 0x01
112#define C0_SETPM 0x04
113#define C0_GETDM 0x02
114#define C0_GETPM 0x08
115/*
116 * Change endianness indicator in the BD command field
117 */
118#define CHANGE_ENDIANNESS 0x80
119
120/*
121 * Mode/Count of data node descriptors - IPCv2
122 */
123struct sdma_mode_count {
124 u32 count : 16; /* size of the buffer pointed by this BD */
125 u32 status : 8; /* E,R,I,C,W,D status bits stored here */
126 u32 command : 8; /* command mostlky used for channel 0 */
127};
128
129/*
130 * Buffer descriptor
131 */
132struct sdma_buffer_descriptor {
133 struct sdma_mode_count mode;
134 u32 buffer_addr; /* address of the buffer described */
135 u32 ext_buffer_addr; /* extended buffer address */
136} __attribute__ ((packed));
137
138/**
139 * struct sdma_channel_control - Channel control Block
140 *
141 * @current_bd_ptr current buffer descriptor processed
142 * @base_bd_ptr first element of buffer descriptor array
143 * @unused padding. The SDMA engine expects an array of 128 byte
144 * control blocks
145 */
146struct sdma_channel_control {
147 u32 current_bd_ptr;
148 u32 base_bd_ptr;
149 u32 unused[2];
150} __attribute__ ((packed));
151
152/**
153 * struct sdma_state_registers - SDMA context for a channel
154 *
155 * @pc: program counter
156 * @t: test bit: status of arithmetic & test instruction
157 * @rpc: return program counter
158 * @sf: source fault while loading data
159 * @spc: loop start program counter
160 * @df: destination fault while storing data
161 * @epc: loop end program counter
162 * @lm: loop mode
163 */
164struct sdma_state_registers {
165 u32 pc :14;
166 u32 unused1: 1;
167 u32 t : 1;
168 u32 rpc :14;
169 u32 unused0: 1;
170 u32 sf : 1;
171 u32 spc :14;
172 u32 unused2: 1;
173 u32 df : 1;
174 u32 epc :14;
175 u32 lm : 2;
176} __attribute__ ((packed));
177
178/**
179 * struct sdma_context_data - sdma context specific to a channel
180 *
181 * @channel_state: channel state bits
182 * @gReg: general registers
183 * @mda: burst dma destination address register
184 * @msa: burst dma source address register
185 * @ms: burst dma status register
186 * @md: burst dma data register
187 * @pda: peripheral dma destination address register
188 * @psa: peripheral dma source address register
189 * @ps: peripheral dma status register
190 * @pd: peripheral dma data register
191 * @ca: CRC polynomial register
192 * @cs: CRC accumulator register
193 * @dda: dedicated core destination address register
194 * @dsa: dedicated core source address register
195 * @ds: dedicated core status register
196 * @dd: dedicated core data register
197 */
198struct sdma_context_data {
199 struct sdma_state_registers channel_state;
200 u32 gReg[8];
201 u32 mda;
202 u32 msa;
203 u32 ms;
204 u32 md;
205 u32 pda;
206 u32 psa;
207 u32 ps;
208 u32 pd;
209 u32 ca;
210 u32 cs;
211 u32 dda;
212 u32 dsa;
213 u32 ds;
214 u32 dd;
215 u32 scratch0;
216 u32 scratch1;
217 u32 scratch2;
218 u32 scratch3;
219 u32 scratch4;
220 u32 scratch5;
221 u32 scratch6;
222 u32 scratch7;
223} __attribute__ ((packed));
224
225#define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
226
227struct sdma_engine;
228
229/**
230 * struct sdma_channel - housekeeping for a SDMA channel
231 *
232 * @sdma pointer to the SDMA engine for this channel
233 * @channel the channel number, matches dmaengine chan_id
234 * @direction transfer type. Needed for setting SDMA script
235 * @peripheral_type Peripheral type. Needed for setting SDMA script
236 * @event_id0 aka dma request line
237 * @event_id1 for channels that use 2 events
238 * @word_size peripheral access size
239 * @buf_tail ID of the buffer that was processed
240 * @done channel completion
241 * @num_bd max NUM_BD. number of descriptors currently handling
242 */
243struct sdma_channel {
244 struct sdma_engine *sdma;
245 unsigned int channel;
246 enum dma_data_direction direction;
247 enum sdma_peripheral_type peripheral_type;
248 unsigned int event_id0;
249 unsigned int event_id1;
250 enum dma_slave_buswidth word_size;
251 unsigned int buf_tail;
252 struct completion done;
253 unsigned int num_bd;
254 struct sdma_buffer_descriptor *bd;
255 dma_addr_t bd_phys;
256 unsigned int pc_from_device, pc_to_device;
257 unsigned long flags;
258 dma_addr_t per_address;
259 u32 event_mask0, event_mask1;
260 u32 watermark_level;
261 u32 shp_addr, per_addr;
262 struct dma_chan chan;
263 spinlock_t lock;
264 struct dma_async_tx_descriptor desc;
265 dma_cookie_t last_completed;
266 enum dma_status status;
267};
268
269#define IMX_DMA_SG_LOOP (1 << 0)
270
271#define MAX_DMA_CHANNELS 32
272#define MXC_SDMA_DEFAULT_PRIORITY 1
273#define MXC_SDMA_MIN_PRIORITY 1
274#define MXC_SDMA_MAX_PRIORITY 7
275
276/**
277 * struct sdma_script_start_addrs - SDMA script start pointers
278 *
279 * start addresses of the different functions in the physical
280 * address space of the SDMA engine.
281 */
282struct sdma_script_start_addrs {
283 u32 ap_2_ap_addr;
284 u32 ap_2_bp_addr;
285 u32 ap_2_ap_fixed_addr;
286 u32 bp_2_ap_addr;
287 u32 loopback_on_dsp_side_addr;
288 u32 mcu_interrupt_only_addr;
289 u32 firi_2_per_addr;
290 u32 firi_2_mcu_addr;
291 u32 per_2_firi_addr;
292 u32 mcu_2_firi_addr;
293 u32 uart_2_per_addr;
294 u32 uart_2_mcu_addr;
295 u32 per_2_app_addr;
296 u32 mcu_2_app_addr;
297 u32 per_2_per_addr;
298 u32 uartsh_2_per_addr;
299 u32 uartsh_2_mcu_addr;
300 u32 per_2_shp_addr;
301 u32 mcu_2_shp_addr;
302 u32 ata_2_mcu_addr;
303 u32 mcu_2_ata_addr;
304 u32 app_2_per_addr;
305 u32 app_2_mcu_addr;
306 u32 shp_2_per_addr;
307 u32 shp_2_mcu_addr;
308 u32 mshc_2_mcu_addr;
309 u32 mcu_2_mshc_addr;
310 u32 spdif_2_mcu_addr;
311 u32 mcu_2_spdif_addr;
312 u32 asrc_2_mcu_addr;
313 u32 ext_mem_2_ipu_addr;
314 u32 descrambler_addr;
315 u32 dptc_dvfs_addr;
316 u32 utra_addr;
317 u32 ram_code_start_addr;
318};
319
320#define SDMA_FIRMWARE_MAGIC 0x414d4453
321
322/**
323 * struct sdma_firmware_header - Layout of the firmware image
324 *
325 * @magic "SDMA"
326 * @version_major increased whenever layout of struct sdma_script_start_addrs
327 * changes.
328 * @version_minor firmware minor version (for binary compatible changes)
329 * @script_addrs_start offset of struct sdma_script_start_addrs in this image
330 * @num_script_addrs Number of script addresses in this image
331 * @ram_code_start offset of SDMA ram image in this firmware image
332 * @ram_code_size size of SDMA ram image
333 * @script_addrs Stores the start address of the SDMA scripts
334 * (in SDMA memory space)
335 */
336struct sdma_firmware_header {
337 u32 magic;
338 u32 version_major;
339 u32 version_minor;
340 u32 script_addrs_start;
341 u32 num_script_addrs;
342 u32 ram_code_start;
343 u32 ram_code_size;
344};
345
346struct sdma_engine {
347 struct device *dev;
348 struct sdma_channel channel[MAX_DMA_CHANNELS];
349 struct sdma_channel_control *channel_control;
350 void __iomem *regs;
351 unsigned int version;
352 unsigned int num_events;
353 struct sdma_context_data *context;
354 dma_addr_t context_phys;
355 struct dma_device dma_device;
356 struct clk *clk;
357 struct sdma_script_start_addrs *script_addrs;
358};
359
360#define SDMA_H_CONFIG_DSPDMA (1 << 12) /* indicates if the DSPDMA is used */
361#define SDMA_H_CONFIG_RTD_PINS (1 << 11) /* indicates if Real-Time Debug pins are enabled */
362#define SDMA_H_CONFIG_ACR (1 << 4) /* indicates if AHB freq /core freq = 2 or 1 */
363#define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/
364
365static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
366{
367 u32 chnenbl0 = (sdma->version == 2 ? SDMA_CHNENBL0_V2 : SDMA_CHNENBL0_V1);
368
369 return chnenbl0 + event * 4;
370}
371
372static int sdma_config_ownership(struct sdma_channel *sdmac,
373 bool event_override, bool mcu_override, bool dsp_override)
374{
375 struct sdma_engine *sdma = sdmac->sdma;
376 int channel = sdmac->channel;
377 u32 evt, mcu, dsp;
378
379 if (event_override && mcu_override && dsp_override)
380 return -EINVAL;
381
382 evt = __raw_readl(sdma->regs + SDMA_H_EVTOVR);
383 mcu = __raw_readl(sdma->regs + SDMA_H_HOSTOVR);
384 dsp = __raw_readl(sdma->regs + SDMA_H_DSPOVR);
385
386 if (dsp_override)
387 dsp &= ~(1 << channel);
388 else
389 dsp |= (1 << channel);
390
391 if (event_override)
392 evt &= ~(1 << channel);
393 else
394 evt |= (1 << channel);
395
396 if (mcu_override)
397 mcu &= ~(1 << channel);
398 else
399 mcu |= (1 << channel);
400
401 __raw_writel(evt, sdma->regs + SDMA_H_EVTOVR);
402 __raw_writel(mcu, sdma->regs + SDMA_H_HOSTOVR);
403 __raw_writel(dsp, sdma->regs + SDMA_H_DSPOVR);
404
405 return 0;
406}
407
408/*
409 * sdma_run_channel - run a channel and wait till it's done
410 */
411static int sdma_run_channel(struct sdma_channel *sdmac)
412{
413 struct sdma_engine *sdma = sdmac->sdma;
414 int channel = sdmac->channel;
415 int ret;
416
417 init_completion(&sdmac->done);
418
419 __raw_writel(1 << channel, sdma->regs + SDMA_H_START);
420
421 ret = wait_for_completion_timeout(&sdmac->done, HZ);
422
423 return ret ? 0 : -ETIMEDOUT;
424}
425
426static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
427 u32 address)
428{
429 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
430 void *buf_virt;
431 dma_addr_t buf_phys;
432 int ret;
433
434 buf_virt = dma_alloc_coherent(NULL,
435 size,
436 &buf_phys, GFP_KERNEL);
437 if (!buf_virt)
438 return -ENOMEM;
439
440 bd0->mode.command = C0_SETPM;
441 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
442 bd0->mode.count = size / 2;
443 bd0->buffer_addr = buf_phys;
444 bd0->ext_buffer_addr = address;
445
446 memcpy(buf_virt, buf, size);
447
448 ret = sdma_run_channel(&sdma->channel[0]);
449
450 dma_free_coherent(NULL, size, buf_virt, buf_phys);
451
452 return ret;
453}
454
455static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
456{
457 struct sdma_engine *sdma = sdmac->sdma;
458 int channel = sdmac->channel;
459 u32 val;
460 u32 chnenbl = chnenbl_ofs(sdma, event);
461
462 val = __raw_readl(sdma->regs + chnenbl);
463 val |= (1 << channel);
464 __raw_writel(val, sdma->regs + chnenbl);
465}
466
467static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
468{
469 struct sdma_engine *sdma = sdmac->sdma;
470 int channel = sdmac->channel;
471 u32 chnenbl = chnenbl_ofs(sdma, event);
472 u32 val;
473
474 val = __raw_readl(sdma->regs + chnenbl);
475 val &= ~(1 << channel);
476 __raw_writel(val, sdma->regs + chnenbl);
477}
478
479static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
480{
481 struct sdma_buffer_descriptor *bd;
482
483 /*
484 * loop mode. Iterate over descriptors, re-setup them and
485 * call callback function.
486 */
487 while (1) {
488 bd = &sdmac->bd[sdmac->buf_tail];
489
490 if (bd->mode.status & BD_DONE)
491 break;
492
493 if (bd->mode.status & BD_RROR)
494 sdmac->status = DMA_ERROR;
495 else
496 sdmac->status = DMA_SUCCESS;
497
498 bd->mode.status |= BD_DONE;
499 sdmac->buf_tail++;
500 sdmac->buf_tail %= sdmac->num_bd;
501
502 if (sdmac->desc.callback)
503 sdmac->desc.callback(sdmac->desc.callback_param);
504 }
505}
506
507static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
508{
509 struct sdma_buffer_descriptor *bd;
510 int i, error = 0;
511
512 /*
513 * non loop mode. Iterate over all descriptors, collect
514 * errors and call callback function
515 */
516 for (i = 0; i < sdmac->num_bd; i++) {
517 bd = &sdmac->bd[i];
518
519 if (bd->mode.status & (BD_DONE | BD_RROR))
520 error = -EIO;
521 }
522
523 if (error)
524 sdmac->status = DMA_ERROR;
525 else
526 sdmac->status = DMA_SUCCESS;
527
528 if (sdmac->desc.callback)
529 sdmac->desc.callback(sdmac->desc.callback_param);
530 sdmac->last_completed = sdmac->desc.cookie;
531}
532
533static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
534{
535 complete(&sdmac->done);
536
537 /* not interested in channel 0 interrupts */
538 if (sdmac->channel == 0)
539 return;
540
541 if (sdmac->flags & IMX_DMA_SG_LOOP)
542 sdma_handle_channel_loop(sdmac);
543 else
544 mxc_sdma_handle_channel_normal(sdmac);
545}
546
547static irqreturn_t sdma_int_handler(int irq, void *dev_id)
548{
549 struct sdma_engine *sdma = dev_id;
550 u32 stat;
551
552 stat = __raw_readl(sdma->regs + SDMA_H_INTR);
553 __raw_writel(stat, sdma->regs + SDMA_H_INTR);
554
555 while (stat) {
556 int channel = fls(stat) - 1;
557 struct sdma_channel *sdmac = &sdma->channel[channel];
558
559 mxc_sdma_handle_channel(sdmac);
560
561 stat &= ~(1 << channel);
562 }
563
564 return IRQ_HANDLED;
565}
566
567/*
568 * sets the pc of SDMA script according to the peripheral type
569 */
570static void sdma_get_pc(struct sdma_channel *sdmac,
571 enum sdma_peripheral_type peripheral_type)
572{
573 struct sdma_engine *sdma = sdmac->sdma;
574 int per_2_emi = 0, emi_2_per = 0;
575 /*
576 * These are needed once we start to support transfers between
577 * two peripherals or memory-to-memory transfers
578 */
579 int per_2_per = 0, emi_2_emi = 0;
580
581 sdmac->pc_from_device = 0;
582 sdmac->pc_to_device = 0;
583
584 switch (peripheral_type) {
585 case IMX_DMATYPE_MEMORY:
586 emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
587 break;
588 case IMX_DMATYPE_DSP:
589 emi_2_per = sdma->script_addrs->bp_2_ap_addr;
590 per_2_emi = sdma->script_addrs->ap_2_bp_addr;
591 break;
592 case IMX_DMATYPE_FIRI:
593 per_2_emi = sdma->script_addrs->firi_2_mcu_addr;
594 emi_2_per = sdma->script_addrs->mcu_2_firi_addr;
595 break;
596 case IMX_DMATYPE_UART:
597 per_2_emi = sdma->script_addrs->uart_2_mcu_addr;
598 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
599 break;
600 case IMX_DMATYPE_UART_SP:
601 per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr;
602 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
603 break;
604 case IMX_DMATYPE_ATA:
605 per_2_emi = sdma->script_addrs->ata_2_mcu_addr;
606 emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
607 break;
608 case IMX_DMATYPE_CSPI:
609 case IMX_DMATYPE_EXT:
610 case IMX_DMATYPE_SSI:
611 per_2_emi = sdma->script_addrs->app_2_mcu_addr;
612 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
613 break;
614 case IMX_DMATYPE_SSI_SP:
615 case IMX_DMATYPE_MMC:
616 case IMX_DMATYPE_SDHC:
617 case IMX_DMATYPE_CSPI_SP:
618 case IMX_DMATYPE_ESAI:
619 case IMX_DMATYPE_MSHC_SP:
620 per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
621 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
622 break;
623 case IMX_DMATYPE_ASRC:
624 per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
625 emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
626 per_2_per = sdma->script_addrs->per_2_per_addr;
627 break;
628 case IMX_DMATYPE_MSHC:
629 per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
630 emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
631 break;
632 case IMX_DMATYPE_CCM:
633 per_2_emi = sdma->script_addrs->dptc_dvfs_addr;
634 break;
635 case IMX_DMATYPE_SPDIF:
636 per_2_emi = sdma->script_addrs->spdif_2_mcu_addr;
637 emi_2_per = sdma->script_addrs->mcu_2_spdif_addr;
638 break;
639 case IMX_DMATYPE_IPU_MEMORY:
640 emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
641 break;
642 default:
643 break;
644 }
645
646 sdmac->pc_from_device = per_2_emi;
647 sdmac->pc_to_device = emi_2_per;
648}
649
650static int sdma_load_context(struct sdma_channel *sdmac)
651{
652 struct sdma_engine *sdma = sdmac->sdma;
653 int channel = sdmac->channel;
654 int load_address;
655 struct sdma_context_data *context = sdma->context;
656 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
657 int ret;
658
659 if (sdmac->direction == DMA_FROM_DEVICE) {
660 load_address = sdmac->pc_from_device;
661 } else {
662 load_address = sdmac->pc_to_device;
663 }
664
665 if (load_address < 0)
666 return load_address;
667
668 dev_dbg(sdma->dev, "load_address = %d\n", load_address);
669 dev_dbg(sdma->dev, "wml = 0x%08x\n", sdmac->watermark_level);
670 dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
671 dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
672 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0);
673 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1);
674
675 memset(context, 0, sizeof(*context));
676 context->channel_state.pc = load_address;
677
678 /* Send by context the event mask,base address for peripheral
679 * and watermark level
680 */
681 context->gReg[0] = sdmac->event_mask1;
682 context->gReg[1] = sdmac->event_mask0;
683 context->gReg[2] = sdmac->per_addr;
684 context->gReg[6] = sdmac->shp_addr;
685 context->gReg[7] = sdmac->watermark_level;
686
687 bd0->mode.command = C0_SETDM;
688 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
689 bd0->mode.count = sizeof(*context) / 4;
690 bd0->buffer_addr = sdma->context_phys;
691 bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
692
693 ret = sdma_run_channel(&sdma->channel[0]);
694
695 return ret;
696}
697
698static void sdma_disable_channel(struct sdma_channel *sdmac)
699{
700 struct sdma_engine *sdma = sdmac->sdma;
701 int channel = sdmac->channel;
702
703 __raw_writel(1 << channel, sdma->regs + SDMA_H_STATSTOP);
704 sdmac->status = DMA_ERROR;
705}
706
707static int sdma_config_channel(struct sdma_channel *sdmac)
708{
709 int ret;
710
711 sdma_disable_channel(sdmac);
712
713 sdmac->event_mask0 = 0;
714 sdmac->event_mask1 = 0;
715 sdmac->shp_addr = 0;
716 sdmac->per_addr = 0;
717
718 if (sdmac->event_id0) {
719 if (sdmac->event_id0 > 32)
720 return -EINVAL;
721 sdma_event_enable(sdmac, sdmac->event_id0);
722 }
723
724 switch (sdmac->peripheral_type) {
725 case IMX_DMATYPE_DSP:
726 sdma_config_ownership(sdmac, false, true, true);
727 break;
728 case IMX_DMATYPE_MEMORY:
729 sdma_config_ownership(sdmac, false, true, false);
730 break;
731 default:
732 sdma_config_ownership(sdmac, true, true, false);
733 break;
734 }
735
736 sdma_get_pc(sdmac, sdmac->peripheral_type);
737
738 if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
739 (sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
740 /* Handle multiple event channels differently */
741 if (sdmac->event_id1) {
742 sdmac->event_mask1 = 1 << (sdmac->event_id1 % 32);
743 if (sdmac->event_id1 > 31)
744 sdmac->watermark_level |= 1 << 31;
745 sdmac->event_mask0 = 1 << (sdmac->event_id0 % 32);
746 if (sdmac->event_id0 > 31)
747 sdmac->watermark_level |= 1 << 30;
748 } else {
749 sdmac->event_mask0 = 1 << sdmac->event_id0;
750 sdmac->event_mask1 = 1 << (sdmac->event_id0 - 32);
751 }
752 /* Watermark Level */
753 sdmac->watermark_level |= sdmac->watermark_level;
754 /* Address */
755 sdmac->shp_addr = sdmac->per_address;
756 } else {
757 sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
758 }
759
760 ret = sdma_load_context(sdmac);
761
762 return ret;
763}
764
765static int sdma_set_channel_priority(struct sdma_channel *sdmac,
766 unsigned int priority)
767{
768 struct sdma_engine *sdma = sdmac->sdma;
769 int channel = sdmac->channel;
770
771 if (priority < MXC_SDMA_MIN_PRIORITY
772 || priority > MXC_SDMA_MAX_PRIORITY) {
773 return -EINVAL;
774 }
775
776 __raw_writel(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
777
778 return 0;
779}
780
781static int sdma_request_channel(struct sdma_channel *sdmac)
782{
783 struct sdma_engine *sdma = sdmac->sdma;
784 int channel = sdmac->channel;
785 int ret = -EBUSY;
786
787 sdmac->bd = dma_alloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, GFP_KERNEL);
788 if (!sdmac->bd) {
789 ret = -ENOMEM;
790 goto out;
791 }
792
793 memset(sdmac->bd, 0, PAGE_SIZE);
794
795 sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
796 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
797
798 clk_enable(sdma->clk);
799
800 sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
801
802 init_completion(&sdmac->done);
803
804 sdmac->buf_tail = 0;
805
806 return 0;
807out:
808
809 return ret;
810}
811
812static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
813{
814 __raw_writel(1 << channel, sdma->regs + SDMA_H_START);
815}
816
817static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdma)
818{
819 dma_cookie_t cookie = sdma->chan.cookie;
820
821 if (++cookie < 0)
822 cookie = 1;
823
824 sdma->chan.cookie = cookie;
825 sdma->desc.cookie = cookie;
826
827 return cookie;
828}
829
830static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
831{
832 return container_of(chan, struct sdma_channel, chan);
833}
834
835static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
836{
837 struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
838 struct sdma_engine *sdma = sdmac->sdma;
839 dma_cookie_t cookie;
840
841 spin_lock_irq(&sdmac->lock);
842
843 cookie = sdma_assign_cookie(sdmac);
844
845 sdma_enable_channel(sdma, tx->chan->chan_id);
846
847 spin_unlock_irq(&sdmac->lock);
848
849 return cookie;
850}
851
852static int sdma_alloc_chan_resources(struct dma_chan *chan)
853{
854 struct sdma_channel *sdmac = to_sdma_chan(chan);
855 struct imx_dma_data *data = chan->private;
856 int prio, ret;
857
858 /* No need to execute this for internal channel 0 */
859 if (chan->chan_id == 0)
860 return 0;
861
862 if (!data)
863 return -EINVAL;
864
865 switch (data->priority) {
866 case DMA_PRIO_HIGH:
867 prio = 3;
868 break;
869 case DMA_PRIO_MEDIUM:
870 prio = 2;
871 break;
872 case DMA_PRIO_LOW:
873 default:
874 prio = 1;
875 break;
876 }
877
878 sdmac->peripheral_type = data->peripheral_type;
879 sdmac->event_id0 = data->dma_request;
880 ret = sdma_set_channel_priority(sdmac, prio);
881 if (ret)
882 return ret;
883
884 ret = sdma_request_channel(sdmac);
885 if (ret)
886 return ret;
887
888 dma_async_tx_descriptor_init(&sdmac->desc, chan);
889 sdmac->desc.tx_submit = sdma_tx_submit;
890 /* txd.flags will be overwritten in prep funcs */
891 sdmac->desc.flags = DMA_CTRL_ACK;
892
893 return 0;
894}
895
896static void sdma_free_chan_resources(struct dma_chan *chan)
897{
898 struct sdma_channel *sdmac = to_sdma_chan(chan);
899 struct sdma_engine *sdma = sdmac->sdma;
900
901 sdma_disable_channel(sdmac);
902
903 if (sdmac->event_id0)
904 sdma_event_disable(sdmac, sdmac->event_id0);
905 if (sdmac->event_id1)
906 sdma_event_disable(sdmac, sdmac->event_id1);
907
908 sdmac->event_id0 = 0;
909 sdmac->event_id1 = 0;
910
911 sdma_set_channel_priority(sdmac, 0);
912
913 dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
914
915 clk_disable(sdma->clk);
916}
917
918static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
919 struct dma_chan *chan, struct scatterlist *sgl,
920 unsigned int sg_len, enum dma_data_direction direction,
921 unsigned long flags)
922{
923 struct sdma_channel *sdmac = to_sdma_chan(chan);
924 struct sdma_engine *sdma = sdmac->sdma;
925 int ret, i, count;
926 int channel = chan->chan_id;
927 struct scatterlist *sg;
928
929 if (sdmac->status == DMA_IN_PROGRESS)
930 return NULL;
931 sdmac->status = DMA_IN_PROGRESS;
932
933 sdmac->flags = 0;
934
935 dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
936 sg_len, channel);
937
938 sdmac->direction = direction;
939 ret = sdma_load_context(sdmac);
940 if (ret)
941 goto err_out;
942
943 if (sg_len > NUM_BD) {
944 dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
945 channel, sg_len, NUM_BD);
946 ret = -EINVAL;
947 goto err_out;
948 }
949
950 for_each_sg(sgl, sg, sg_len, i) {
951 struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
952 int param;
953
954 bd->buffer_addr = sgl->dma_address;
955
956 count = sg->length;
957
958 if (count > 0xffff) {
959 dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
960 channel, count, 0xffff);
961 ret = -EINVAL;
962 goto err_out;
963 }
964
965 bd->mode.count = count;
966
967 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
968 ret = -EINVAL;
969 goto err_out;
970 }
971 if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
972 bd->mode.command = 0;
973 else
974 bd->mode.command = sdmac->word_size;
975
976 param = BD_DONE | BD_EXTD | BD_CONT;
977
978 if (sdmac->flags & IMX_DMA_SG_LOOP) {
979 param |= BD_INTR;
980 if (i + 1 == sg_len)
981 param |= BD_WRAP;
982 }
983
984 if (i + 1 == sg_len)
985 param |= BD_INTR;
986
987 dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
988 i, count, sg->dma_address,
989 param & BD_WRAP ? "wrap" : "",
990 param & BD_INTR ? " intr" : "");
991
992 bd->mode.status = param;
993 }
994
995 sdmac->num_bd = sg_len;
996 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
997
998 return &sdmac->desc;
999err_out:
1000 return NULL;
1001}
1002
1003static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1004 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
1005 size_t period_len, enum dma_data_direction direction)
1006{
1007 struct sdma_channel *sdmac = to_sdma_chan(chan);
1008 struct sdma_engine *sdma = sdmac->sdma;
1009 int num_periods = buf_len / period_len;
1010 int channel = chan->chan_id;
1011 int ret, i = 0, buf = 0;
1012
1013 dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
1014
1015 if (sdmac->status == DMA_IN_PROGRESS)
1016 return NULL;
1017
1018 sdmac->status = DMA_IN_PROGRESS;
1019
1020 sdmac->flags |= IMX_DMA_SG_LOOP;
1021 sdmac->direction = direction;
1022 ret = sdma_load_context(sdmac);
1023 if (ret)
1024 goto err_out;
1025
1026 if (num_periods > NUM_BD) {
1027 dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
1028 channel, num_periods, NUM_BD);
1029 goto err_out;
1030 }
1031
1032 if (period_len > 0xffff) {
1033 dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n",
1034 channel, period_len, 0xffff);
1035 goto err_out;
1036 }
1037
1038 while (buf < buf_len) {
1039 struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
1040 int param;
1041
1042 bd->buffer_addr = dma_addr;
1043
1044 bd->mode.count = period_len;
1045
1046 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1047 goto err_out;
1048 if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
1049 bd->mode.command = 0;
1050 else
1051 bd->mode.command = sdmac->word_size;
1052
1053 param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
1054 if (i + 1 == num_periods)
1055 param |= BD_WRAP;
1056
1057 dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
1058 i, period_len, dma_addr,
1059 param & BD_WRAP ? "wrap" : "",
1060 param & BD_INTR ? " intr" : "");
1061
1062 bd->mode.status = param;
1063
1064 dma_addr += period_len;
1065 buf += period_len;
1066
1067 i++;
1068 }
1069
1070 sdmac->num_bd = num_periods;
1071 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1072
1073 return &sdmac->desc;
1074err_out:
1075 sdmac->status = DMA_ERROR;
1076 return NULL;
1077}
1078
1079static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1080 unsigned long arg)
1081{
1082 struct sdma_channel *sdmac = to_sdma_chan(chan);
1083 struct dma_slave_config *dmaengine_cfg = (void *)arg;
1084
1085 switch (cmd) {
1086 case DMA_TERMINATE_ALL:
1087 sdma_disable_channel(sdmac);
1088 return 0;
1089 case DMA_SLAVE_CONFIG:
1090 if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
1091 sdmac->per_address = dmaengine_cfg->src_addr;
1092 sdmac->watermark_level = dmaengine_cfg->src_maxburst;
1093 sdmac->word_size = dmaengine_cfg->src_addr_width;
1094 } else {
1095 sdmac->per_address = dmaengine_cfg->dst_addr;
1096 sdmac->watermark_level = dmaengine_cfg->dst_maxburst;
1097 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1098 }
1099 return sdma_config_channel(sdmac);
1100 default:
1101 return -ENOSYS;
1102 }
1103
1104 return -EINVAL;
1105}
1106
1107static enum dma_status sdma_tx_status(struct dma_chan *chan,
1108 dma_cookie_t cookie,
1109 struct dma_tx_state *txstate)
1110{
1111 struct sdma_channel *sdmac = to_sdma_chan(chan);
1112 dma_cookie_t last_used;
1113 enum dma_status ret;
1114
1115 last_used = chan->cookie;
1116
1117 ret = dma_async_is_complete(cookie, sdmac->last_completed, last_used);
1118 dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0);
1119
1120 return ret;
1121}
1122
1123static void sdma_issue_pending(struct dma_chan *chan)
1124{
1125 /*
1126 * Nothing to do. We only have a single descriptor
1127 */
1128}
1129
1130static int __init sdma_init(struct sdma_engine *sdma,
1131 void *ram_code, int ram_code_size)
1132{
1133 int i, ret;
1134 dma_addr_t ccb_phys;
1135
1136 switch (sdma->version) {
1137 case 1:
1138 sdma->num_events = 32;
1139 break;
1140 case 2:
1141 sdma->num_events = 48;
1142 break;
1143 default:
1144 dev_err(sdma->dev, "Unknown version %d. aborting\n", sdma->version);
1145 return -ENODEV;
1146 }
1147
1148 clk_enable(sdma->clk);
1149
1150 /* Be sure SDMA has not started yet */
1151 __raw_writel(0, sdma->regs + SDMA_H_C0PTR);
1152
1153 sdma->channel_control = dma_alloc_coherent(NULL,
1154 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
1155 sizeof(struct sdma_context_data),
1156 &ccb_phys, GFP_KERNEL);
1157
1158 if (!sdma->channel_control) {
1159 ret = -ENOMEM;
1160 goto err_dma_alloc;
1161 }
1162
1163 sdma->context = (void *)sdma->channel_control +
1164 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1165 sdma->context_phys = ccb_phys +
1166 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1167
1168 /* Zero-out the CCB structures array just allocated */
1169 memset(sdma->channel_control, 0,
1170 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));
1171
1172 /* disable all channels */
1173 for (i = 0; i < sdma->num_events; i++)
1174 __raw_writel(0, sdma->regs + chnenbl_ofs(sdma, i));
1175
1176 /* All channels have priority 0 */
1177 for (i = 0; i < MAX_DMA_CHANNELS; i++)
1178 __raw_writel(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
1179
1180 ret = sdma_request_channel(&sdma->channel[0]);
1181 if (ret)
1182 goto err_dma_alloc;
1183
1184 sdma_config_ownership(&sdma->channel[0], false, true, false);
1185
1186 /* Set Command Channel (Channel Zero) */
1187 __raw_writel(0x4050, sdma->regs + SDMA_CHN0ADDR);
1188
1189 /* Set bits of CONFIG register but with static context switching */
1190 /* FIXME: Check whether to set ACR bit depending on clock ratios */
1191 __raw_writel(0, sdma->regs + SDMA_H_CONFIG);
1192
1193 __raw_writel(ccb_phys, sdma->regs + SDMA_H_C0PTR);
1194
1195 /* download the RAM image for SDMA */
1196 sdma_load_script(sdma, ram_code,
1197 ram_code_size,
1198 sdma->script_addrs->ram_code_start_addr);
1199
1200 /* Set bits of CONFIG register with given context switching mode */
1201 __raw_writel(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
1202
1203 /* Initializes channel's priorities */
1204 sdma_set_channel_priority(&sdma->channel[0], 7);
1205
1206 clk_disable(sdma->clk);
1207
1208 return 0;
1209
1210err_dma_alloc:
1211 clk_disable(sdma->clk);
1212 dev_err(sdma->dev, "initialisation failed with %d\n", ret);
1213 return ret;
1214}
1215
1216static int __init sdma_probe(struct platform_device *pdev)
1217{
1218 int ret;
1219 const struct firmware *fw;
1220 const struct sdma_firmware_header *header;
1221 const struct sdma_script_start_addrs *addr;
1222 int irq;
1223 unsigned short *ram_code;
1224 struct resource *iores;
1225 struct sdma_platform_data *pdata = pdev->dev.platform_data;
1226 char *fwname;
1227 int i;
1228 dma_cap_mask_t mask;
1229 struct sdma_engine *sdma;
1230
1231 sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
1232 if (!sdma)
1233 return -ENOMEM;
1234
1235 sdma->dev = &pdev->dev;
1236
1237 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1238 irq = platform_get_irq(pdev, 0);
1239 if (!iores || irq < 0 || !pdata) {
1240 ret = -EINVAL;
1241 goto err_irq;
1242 }
1243
1244 if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) {
1245 ret = -EBUSY;
1246 goto err_request_region;
1247 }
1248
1249 sdma->clk = clk_get(&pdev->dev, NULL);
1250 if (IS_ERR(sdma->clk)) {
1251 ret = PTR_ERR(sdma->clk);
1252 goto err_clk;
1253 }
1254
1255 sdma->regs = ioremap(iores->start, resource_size(iores));
1256 if (!sdma->regs) {
1257 ret = -ENOMEM;
1258 goto err_ioremap;
1259 }
1260
1261 ret = request_irq(irq, sdma_int_handler, 0, "sdma", sdma);
1262 if (ret)
1263 goto err_request_irq;
1264
1265 fwname = kasprintf(GFP_KERNEL, "sdma-%s-to%d.bin",
1266 pdata->cpu_name, pdata->to_version);
1267 if (!fwname) {
1268 ret = -ENOMEM;
1269 goto err_cputype;
1270 }
1271
1272 ret = request_firmware(&fw, fwname, &pdev->dev);
1273 if (ret) {
1274 dev_err(&pdev->dev, "request firmware \"%s\" failed with %d\n",
1275 fwname, ret);
1276 kfree(fwname);
1277 goto err_cputype;
1278 }
1279 kfree(fwname);
1280
1281 if (fw->size < sizeof(*header))
1282 goto err_firmware;
1283
1284 header = (struct sdma_firmware_header *)fw->data;
1285
1286 if (header->magic != SDMA_FIRMWARE_MAGIC)
1287 goto err_firmware;
1288 if (header->ram_code_start + header->ram_code_size > fw->size)
1289 goto err_firmware;
1290
1291 addr = (void *)header + header->script_addrs_start;
1292 ram_code = (void *)header + header->ram_code_start;
1293 sdma->script_addrs = kmalloc(sizeof(*addr), GFP_KERNEL);
1294 if (!sdma->script_addrs)
1295 goto err_firmware;
1296 memcpy(sdma->script_addrs, addr, sizeof(*addr));
1297
1298 sdma->version = pdata->sdma_version;
1299
1300 INIT_LIST_HEAD(&sdma->dma_device.channels);
1301 /* Initialize channel parameters */
1302 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
1303 struct sdma_channel *sdmac = &sdma->channel[i];
1304
1305 sdmac->sdma = sdma;
1306 spin_lock_init(&sdmac->lock);
1307
1308 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
1309 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
1310
1311 sdmac->chan.device = &sdma->dma_device;
1312 sdmac->chan.chan_id = i;
1313 sdmac->channel = i;
1314
1315 /* Add the channel to the DMAC list */
1316 list_add_tail(&sdmac->chan.device_node, &sdma->dma_device.channels);
1317 }
1318
1319 ret = sdma_init(sdma, ram_code, header->ram_code_size);
1320 if (ret)
1321 goto err_init;
1322
1323 sdma->dma_device.dev = &pdev->dev;
1324
1325 sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
1326 sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
1327 sdma->dma_device.device_tx_status = sdma_tx_status;
1328 sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
1329 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
1330 sdma->dma_device.device_control = sdma_control;
1331 sdma->dma_device.device_issue_pending = sdma_issue_pending;
1332
1333 ret = dma_async_device_register(&sdma->dma_device);
1334 if (ret) {
1335 dev_err(&pdev->dev, "unable to register\n");
1336 goto err_init;
1337 }
1338
1339 dev_info(&pdev->dev, "initialized (firmware %d.%d)\n",
1340 header->version_major,
1341 header->version_minor);
1342
1343 /* request channel 0. This is an internal control channel
1344 * to the SDMA engine and not available to clients.
1345 */
1346 dma_cap_zero(mask);
1347 dma_cap_set(DMA_SLAVE, mask);
1348 dma_request_channel(mask, NULL, NULL);
1349
1350 release_firmware(fw);
1351
1352 return 0;
1353
1354err_init:
1355 kfree(sdma->script_addrs);
1356err_firmware:
1357 release_firmware(fw);
1358err_cputype:
1359 free_irq(irq, sdma);
1360err_request_irq:
1361 iounmap(sdma->regs);
1362err_ioremap:
1363 clk_put(sdma->clk);
1364err_clk:
1365 release_mem_region(iores->start, resource_size(iores));
1366err_request_region:
1367err_irq:
1368 kfree(sdma);
1369 return 0;
1370}
1371
1372static int __exit sdma_remove(struct platform_device *pdev)
1373{
1374 return -EBUSY;
1375}
1376
1377static struct platform_driver sdma_driver = {
1378 .driver = {
1379 .name = "imx-sdma",
1380 },
1381 .remove = __exit_p(sdma_remove),
1382};
1383
1384static int __init sdma_module_init(void)
1385{
1386 return platform_driver_probe(&sdma_driver, sdma_probe);
1387}
1388subsys_initcall(sdma_module_init);
1389
1390MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1391MODULE_DESCRIPTION("i.MX SDMA driver");
1392MODULE_LICENSE("GPL");
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index c2591e8d9b6e..338bc4eed1f3 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -25,6 +25,7 @@
25 */ 25 */
26#include <linux/pci.h> 26#include <linux/pci.h>
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/pm_runtime.h>
28#include <linux/intel_mid_dma.h> 29#include <linux/intel_mid_dma.h>
29 30
30#define MAX_CHAN 4 /*max ch across controllers*/ 31#define MAX_CHAN 4 /*max ch across controllers*/
@@ -91,13 +92,13 @@ static int get_block_ts(int len, int tx_width, int block_size)
91 int byte_width = 0, block_ts = 0; 92 int byte_width = 0, block_ts = 0;
92 93
93 switch (tx_width) { 94 switch (tx_width) {
94 case LNW_DMA_WIDTH_8BIT: 95 case DMA_SLAVE_BUSWIDTH_1_BYTE:
95 byte_width = 1; 96 byte_width = 1;
96 break; 97 break;
97 case LNW_DMA_WIDTH_16BIT: 98 case DMA_SLAVE_BUSWIDTH_2_BYTES:
98 byte_width = 2; 99 byte_width = 2;
99 break; 100 break;
100 case LNW_DMA_WIDTH_32BIT: 101 case DMA_SLAVE_BUSWIDTH_4_BYTES:
101 default: 102 default:
102 byte_width = 4; 103 byte_width = 4;
103 break; 104 break;
@@ -247,16 +248,17 @@ static void midc_dostart(struct intel_mid_dma_chan *midc,
247 struct middma_device *mid = to_middma_device(midc->chan.device); 248 struct middma_device *mid = to_middma_device(midc->chan.device);
248 249
249 /* channel is idle */ 250 /* channel is idle */
250 if (midc->in_use && test_ch_en(midc->dma_base, midc->ch_id)) { 251 if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) {
251 /*error*/ 252 /*error*/
252 pr_err("ERR_MDMA: channel is busy in start\n"); 253 pr_err("ERR_MDMA: channel is busy in start\n");
253 /* The tasklet will hopefully advance the queue... */ 254 /* The tasklet will hopefully advance the queue... */
254 return; 255 return;
255 } 256 }
256 257 midc->busy = true;
257 /*write registers and en*/ 258 /*write registers and en*/
258 iowrite32(first->sar, midc->ch_regs + SAR); 259 iowrite32(first->sar, midc->ch_regs + SAR);
259 iowrite32(first->dar, midc->ch_regs + DAR); 260 iowrite32(first->dar, midc->ch_regs + DAR);
261 iowrite32(first->lli_phys, midc->ch_regs + LLP);
260 iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH); 262 iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH);
261 iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW); 263 iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW);
262 iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW); 264 iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW);
@@ -264,9 +266,9 @@ static void midc_dostart(struct intel_mid_dma_chan *midc,
264 pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n", 266 pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n",
265 (int)first->sar, (int)first->dar, first->cfg_hi, 267 (int)first->sar, (int)first->dar, first->cfg_hi,
266 first->cfg_lo, first->ctl_hi, first->ctl_lo); 268 first->cfg_lo, first->ctl_hi, first->ctl_lo);
269 first->status = DMA_IN_PROGRESS;
267 270
268 iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); 271 iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
269 first->status = DMA_IN_PROGRESS;
270} 272}
271 273
272/** 274/**
@@ -283,20 +285,36 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
283{ 285{
284 struct dma_async_tx_descriptor *txd = &desc->txd; 286 struct dma_async_tx_descriptor *txd = &desc->txd;
285 dma_async_tx_callback callback_txd = NULL; 287 dma_async_tx_callback callback_txd = NULL;
288 struct intel_mid_dma_lli *llitem;
286 void *param_txd = NULL; 289 void *param_txd = NULL;
287 290
288 midc->completed = txd->cookie; 291 midc->completed = txd->cookie;
289 callback_txd = txd->callback; 292 callback_txd = txd->callback;
290 param_txd = txd->callback_param; 293 param_txd = txd->callback_param;
291 294
292 list_move(&desc->desc_node, &midc->free_list); 295 if (desc->lli != NULL) {
293 296 /*clear the DONE bit of completed LLI in memory*/
297 llitem = desc->lli + desc->current_lli;
298 llitem->ctl_hi &= CLEAR_DONE;
299 if (desc->current_lli < desc->lli_length-1)
300 (desc->current_lli)++;
301 else
302 desc->current_lli = 0;
303 }
294 spin_unlock_bh(&midc->lock); 304 spin_unlock_bh(&midc->lock);
295 if (callback_txd) { 305 if (callback_txd) {
296 pr_debug("MDMA: TXD callback set ... calling\n"); 306 pr_debug("MDMA: TXD callback set ... calling\n");
297 callback_txd(param_txd); 307 callback_txd(param_txd);
298 spin_lock_bh(&midc->lock); 308 }
299 return; 309 if (midc->raw_tfr) {
310 desc->status = DMA_SUCCESS;
311 if (desc->lli != NULL) {
312 pci_pool_free(desc->lli_pool, desc->lli,
313 desc->lli_phys);
314 pci_pool_destroy(desc->lli_pool);
315 }
316 list_move(&desc->desc_node, &midc->free_list);
317 midc->busy = false;
300 } 318 }
301 spin_lock_bh(&midc->lock); 319 spin_lock_bh(&midc->lock);
302 320
@@ -317,14 +335,89 @@ static void midc_scan_descriptors(struct middma_device *mid,
317 335
318 /*tx is complete*/ 336 /*tx is complete*/
319 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 337 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
320 if (desc->status == DMA_IN_PROGRESS) { 338 if (desc->status == DMA_IN_PROGRESS)
321 desc->status = DMA_SUCCESS;
322 midc_descriptor_complete(midc, desc); 339 midc_descriptor_complete(midc, desc);
323 }
324 } 340 }
325 return; 341 return;
326} 342 }
343/**
344 * midc_lli_fill_sg - Helper function to convert
345 * SG list to Linked List Items.
346 *@midc: Channel
347 *@desc: DMA descriptor
348 *@sglist: Pointer to SG list
349 *@sglen: SG list length
350 *@flags: DMA transaction flags
351 *
352 * Walk through the SG list and convert the SG list into Linked
353 * List Items (LLI).
354 */
355static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
356 struct intel_mid_dma_desc *desc,
357 struct scatterlist *sglist,
358 unsigned int sglen,
359 unsigned int flags)
360{
361 struct intel_mid_dma_slave *mids;
362 struct scatterlist *sg;
363 dma_addr_t lli_next, sg_phy_addr;
364 struct intel_mid_dma_lli *lli_bloc_desc;
365 union intel_mid_dma_ctl_lo ctl_lo;
366 union intel_mid_dma_ctl_hi ctl_hi;
367 int i;
327 368
369 pr_debug("MDMA: Entered midc_lli_fill_sg\n");
370 mids = midc->mid_slave;
371
372 lli_bloc_desc = desc->lli;
373 lli_next = desc->lli_phys;
374
375 ctl_lo.ctl_lo = desc->ctl_lo;
376 ctl_hi.ctl_hi = desc->ctl_hi;
377 for_each_sg(sglist, sg, sglen, i) {
378 /*Populate CTL_LOW and LLI values*/
379 if (i != sglen - 1) {
380 lli_next = lli_next +
381 sizeof(struct intel_mid_dma_lli);
382 } else {
383 /*Check for circular list, otherwise terminate LLI to ZERO*/
384 if (flags & DMA_PREP_CIRCULAR_LIST) {
385 pr_debug("MDMA: LLI is configured in circular mode\n");
386 lli_next = desc->lli_phys;
387 } else {
388 lli_next = 0;
389 ctl_lo.ctlx.llp_dst_en = 0;
390 ctl_lo.ctlx.llp_src_en = 0;
391 }
392 }
393 /*Populate CTL_HI values*/
394 ctl_hi.ctlx.block_ts = get_block_ts(sg->length,
395 desc->width,
396 midc->dma->block_size);
397 /*Populate SAR and DAR values*/
398 sg_phy_addr = sg_phys(sg);
399 if (desc->dirn == DMA_TO_DEVICE) {
400 lli_bloc_desc->sar = sg_phy_addr;
401 lli_bloc_desc->dar = mids->dma_slave.dst_addr;
402 } else if (desc->dirn == DMA_FROM_DEVICE) {
403 lli_bloc_desc->sar = mids->dma_slave.src_addr;
404 lli_bloc_desc->dar = sg_phy_addr;
405 }
406 /*Copy values into block descriptor in system memroy*/
407 lli_bloc_desc->llp = lli_next;
408 lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo;
409 lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi;
410
411 lli_bloc_desc++;
412 }
413 /*Copy very first LLI values to descriptor*/
414 desc->ctl_lo = desc->lli->ctl_lo;
415 desc->ctl_hi = desc->lli->ctl_hi;
416 desc->sar = desc->lli->sar;
417 desc->dar = desc->lli->dar;
418
419 return 0;
420}
328/***************************************************************************** 421/*****************************************************************************
329DMA engine callback Functions*/ 422DMA engine callback Functions*/
330/** 423/**
@@ -349,12 +442,12 @@ static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx)
349 desc->txd.cookie = cookie; 442 desc->txd.cookie = cookie;
350 443
351 444
352 if (list_empty(&midc->active_list)) { 445 if (list_empty(&midc->active_list))
353 midc_dostart(midc, desc);
354 list_add_tail(&desc->desc_node, &midc->active_list); 446 list_add_tail(&desc->desc_node, &midc->active_list);
355 } else { 447 else
356 list_add_tail(&desc->desc_node, &midc->queue); 448 list_add_tail(&desc->desc_node, &midc->queue);
357 } 449
450 midc_dostart(midc, desc);
358 spin_unlock_bh(&midc->lock); 451 spin_unlock_bh(&midc->lock);
359 452
360 return cookie; 453 return cookie;
@@ -414,6 +507,23 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
414 return ret; 507 return ret;
415} 508}
416 509
510static int dma_slave_control(struct dma_chan *chan, unsigned long arg)
511{
512 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
513 struct dma_slave_config *slave = (struct dma_slave_config *)arg;
514 struct intel_mid_dma_slave *mid_slave;
515
516 BUG_ON(!midc);
517 BUG_ON(!slave);
518 pr_debug("MDMA: slave control called\n");
519
520 mid_slave = to_intel_mid_dma_slave(slave);
521
522 BUG_ON(!mid_slave);
523
524 midc->mid_slave = mid_slave;
525 return 0;
526}
417/** 527/**
418 * intel_mid_dma_device_control - DMA device control 528 * intel_mid_dma_device_control - DMA device control
419 * @chan: chan for DMA control 529 * @chan: chan for DMA control
@@ -428,49 +538,41 @@ static int intel_mid_dma_device_control(struct dma_chan *chan,
428 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 538 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
429 struct middma_device *mid = to_middma_device(chan->device); 539 struct middma_device *mid = to_middma_device(chan->device);
430 struct intel_mid_dma_desc *desc, *_desc; 540 struct intel_mid_dma_desc *desc, *_desc;
431 LIST_HEAD(list); 541 union intel_mid_dma_cfg_lo cfg_lo;
542
543 if (cmd == DMA_SLAVE_CONFIG)
544 return dma_slave_control(chan, arg);
432 545
433 if (cmd != DMA_TERMINATE_ALL) 546 if (cmd != DMA_TERMINATE_ALL)
434 return -ENXIO; 547 return -ENXIO;
435 548
436 spin_lock_bh(&midc->lock); 549 spin_lock_bh(&midc->lock);
437 if (midc->in_use == false) { 550 if (midc->busy == false) {
438 spin_unlock_bh(&midc->lock); 551 spin_unlock_bh(&midc->lock);
439 return 0; 552 return 0;
440 } 553 }
441 list_splice_init(&midc->free_list, &list); 554 /*Suspend and disable the channel*/
442 midc->descs_allocated = 0; 555 cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW);
443 midc->slave = NULL; 556 cfg_lo.cfgx.ch_susp = 1;
444 557 iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW);
558 iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
559 midc->busy = false;
445 /* Disable interrupts */ 560 /* Disable interrupts */
446 disable_dma_interrupt(midc); 561 disable_dma_interrupt(midc);
562 midc->descs_allocated = 0;
447 563
448 spin_unlock_bh(&midc->lock); 564 spin_unlock_bh(&midc->lock);
449 list_for_each_entry_safe(desc, _desc, &list, desc_node) { 565 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
450 pr_debug("MDMA: freeing descriptor %p\n", desc); 566 if (desc->lli != NULL) {
451 pci_pool_free(mid->dma_pool, desc, desc->txd.phys); 567 pci_pool_free(desc->lli_pool, desc->lli,
568 desc->lli_phys);
569 pci_pool_destroy(desc->lli_pool);
570 }
571 list_move(&desc->desc_node, &midc->free_list);
452 } 572 }
453 return 0; 573 return 0;
454} 574}
455 575
456/**
457 * intel_mid_dma_prep_slave_sg - Prep slave sg txn
458 * @chan: chan for DMA transfer
459 * @sgl: scatter gather list
460 * @sg_len: length of sg txn
461 * @direction: DMA transfer dirtn
462 * @flags: DMA flags
463 *
464 * Do DMA sg txn: NOT supported now
465 */
466static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
467 struct dma_chan *chan, struct scatterlist *sgl,
468 unsigned int sg_len, enum dma_data_direction direction,
469 unsigned long flags)
470{
471 /*not supported now*/
472 return NULL;
473}
474 576
475/** 577/**
476 * intel_mid_dma_prep_memcpy - Prep memcpy txn 578 * intel_mid_dma_prep_memcpy - Prep memcpy txn
@@ -495,23 +597,24 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
495 union intel_mid_dma_ctl_hi ctl_hi; 597 union intel_mid_dma_ctl_hi ctl_hi;
496 union intel_mid_dma_cfg_lo cfg_lo; 598 union intel_mid_dma_cfg_lo cfg_lo;
497 union intel_mid_dma_cfg_hi cfg_hi; 599 union intel_mid_dma_cfg_hi cfg_hi;
498 enum intel_mid_dma_width width = 0; 600 enum dma_slave_buswidth width;
499 601
500 pr_debug("MDMA: Prep for memcpy\n"); 602 pr_debug("MDMA: Prep for memcpy\n");
501 WARN_ON(!chan); 603 BUG_ON(!chan);
502 if (!len) 604 if (!len)
503 return NULL; 605 return NULL;
504 606
505 mids = chan->private;
506 WARN_ON(!mids);
507
508 midc = to_intel_mid_dma_chan(chan); 607 midc = to_intel_mid_dma_chan(chan);
509 WARN_ON(!midc); 608 BUG_ON(!midc);
609
610 mids = midc->mid_slave;
611 BUG_ON(!mids);
510 612
511 pr_debug("MDMA:called for DMA %x CH %d Length %zu\n", 613 pr_debug("MDMA:called for DMA %x CH %d Length %zu\n",
512 midc->dma->pci_id, midc->ch_id, len); 614 midc->dma->pci_id, midc->ch_id, len);
513 pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n", 615 pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n",
514 mids->cfg_mode, mids->dirn, mids->hs_mode, mids->src_width); 616 mids->cfg_mode, mids->dma_slave.direction,
617 mids->hs_mode, mids->dma_slave.src_addr_width);
515 618
516 /*calculate CFG_LO*/ 619 /*calculate CFG_LO*/
517 if (mids->hs_mode == LNW_DMA_SW_HS) { 620 if (mids->hs_mode == LNW_DMA_SW_HS) {
@@ -530,13 +633,13 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
530 if (midc->dma->pimr_mask) { 633 if (midc->dma->pimr_mask) {
531 cfg_hi.cfgx.protctl = 0x0; /*default value*/ 634 cfg_hi.cfgx.protctl = 0x0; /*default value*/
532 cfg_hi.cfgx.fifo_mode = 1; 635 cfg_hi.cfgx.fifo_mode = 1;
533 if (mids->dirn == DMA_TO_DEVICE) { 636 if (mids->dma_slave.direction == DMA_TO_DEVICE) {
534 cfg_hi.cfgx.src_per = 0; 637 cfg_hi.cfgx.src_per = 0;
535 if (mids->device_instance == 0) 638 if (mids->device_instance == 0)
536 cfg_hi.cfgx.dst_per = 3; 639 cfg_hi.cfgx.dst_per = 3;
537 if (mids->device_instance == 1) 640 if (mids->device_instance == 1)
538 cfg_hi.cfgx.dst_per = 1; 641 cfg_hi.cfgx.dst_per = 1;
539 } else if (mids->dirn == DMA_FROM_DEVICE) { 642 } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
540 if (mids->device_instance == 0) 643 if (mids->device_instance == 0)
541 cfg_hi.cfgx.src_per = 2; 644 cfg_hi.cfgx.src_per = 2;
542 if (mids->device_instance == 1) 645 if (mids->device_instance == 1)
@@ -552,7 +655,8 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
552 655
553 /*calculate CTL_HI*/ 656 /*calculate CTL_HI*/
554 ctl_hi.ctlx.reser = 0; 657 ctl_hi.ctlx.reser = 0;
555 width = mids->src_width; 658 ctl_hi.ctlx.done = 0;
659 width = mids->dma_slave.src_addr_width;
556 660
557 ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size); 661 ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size);
558 pr_debug("MDMA:calc len %d for block size %d\n", 662 pr_debug("MDMA:calc len %d for block size %d\n",
@@ -560,21 +664,21 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
560 /*calculate CTL_LO*/ 664 /*calculate CTL_LO*/
561 ctl_lo.ctl_lo = 0; 665 ctl_lo.ctl_lo = 0;
562 ctl_lo.ctlx.int_en = 1; 666 ctl_lo.ctlx.int_en = 1;
563 ctl_lo.ctlx.dst_tr_width = mids->dst_width; 667 ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width;
564 ctl_lo.ctlx.src_tr_width = mids->src_width; 668 ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width;
565 ctl_lo.ctlx.dst_msize = mids->src_msize; 669 ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst;
566 ctl_lo.ctlx.src_msize = mids->dst_msize; 670 ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst;
567 671
568 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { 672 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
569 ctl_lo.ctlx.tt_fc = 0; 673 ctl_lo.ctlx.tt_fc = 0;
570 ctl_lo.ctlx.sinc = 0; 674 ctl_lo.ctlx.sinc = 0;
571 ctl_lo.ctlx.dinc = 0; 675 ctl_lo.ctlx.dinc = 0;
572 } else { 676 } else {
573 if (mids->dirn == DMA_TO_DEVICE) { 677 if (mids->dma_slave.direction == DMA_TO_DEVICE) {
574 ctl_lo.ctlx.sinc = 0; 678 ctl_lo.ctlx.sinc = 0;
575 ctl_lo.ctlx.dinc = 2; 679 ctl_lo.ctlx.dinc = 2;
576 ctl_lo.ctlx.tt_fc = 1; 680 ctl_lo.ctlx.tt_fc = 1;
577 } else if (mids->dirn == DMA_FROM_DEVICE) { 681 } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
578 ctl_lo.ctlx.sinc = 2; 682 ctl_lo.ctlx.sinc = 2;
579 ctl_lo.ctlx.dinc = 0; 683 ctl_lo.ctlx.dinc = 0;
580 ctl_lo.ctlx.tt_fc = 2; 684 ctl_lo.ctlx.tt_fc = 2;
@@ -597,7 +701,10 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
597 desc->ctl_lo = ctl_lo.ctl_lo; 701 desc->ctl_lo = ctl_lo.ctl_lo;
598 desc->ctl_hi = ctl_hi.ctl_hi; 702 desc->ctl_hi = ctl_hi.ctl_hi;
599 desc->width = width; 703 desc->width = width;
600 desc->dirn = mids->dirn; 704 desc->dirn = mids->dma_slave.direction;
705 desc->lli_phys = 0;
706 desc->lli = NULL;
707 desc->lli_pool = NULL;
601 return &desc->txd; 708 return &desc->txd;
602 709
603err_desc_get: 710err_desc_get:
@@ -605,6 +712,85 @@ err_desc_get:
605 midc_desc_put(midc, desc); 712 midc_desc_put(midc, desc);
606 return NULL; 713 return NULL;
607} 714}
715/**
716 * intel_mid_dma_prep_slave_sg - Prep slave sg txn
717 * @chan: chan for DMA transfer
718 * @sgl: scatter gather list
719 * @sg_len: length of sg txn
720 * @direction: DMA transfer dirtn
721 * @flags: DMA flags
722 *
723 * Prepares LLI based periphral transfer
724 */
725static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
726 struct dma_chan *chan, struct scatterlist *sgl,
727 unsigned int sg_len, enum dma_data_direction direction,
728 unsigned long flags)
729{
730 struct intel_mid_dma_chan *midc = NULL;
731 struct intel_mid_dma_slave *mids = NULL;
732 struct intel_mid_dma_desc *desc = NULL;
733 struct dma_async_tx_descriptor *txd = NULL;
734 union intel_mid_dma_ctl_lo ctl_lo;
735
736 pr_debug("MDMA: Prep for slave SG\n");
737
738 if (!sg_len) {
739 pr_err("MDMA: Invalid SG length\n");
740 return NULL;
741 }
742 midc = to_intel_mid_dma_chan(chan);
743 BUG_ON(!midc);
744
745 mids = midc->mid_slave;
746 BUG_ON(!mids);
747
748 if (!midc->dma->pimr_mask) {
749 pr_debug("MDMA: SG list is not supported by this controller\n");
750 return NULL;
751 }
752
753 pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
754 sg_len, direction, flags);
755
756 txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags);
757 if (NULL == txd) {
758 pr_err("MDMA: Prep memcpy failed\n");
759 return NULL;
760 }
761 desc = to_intel_mid_dma_desc(txd);
762 desc->dirn = direction;
763 ctl_lo.ctl_lo = desc->ctl_lo;
764 ctl_lo.ctlx.llp_dst_en = 1;
765 ctl_lo.ctlx.llp_src_en = 1;
766 desc->ctl_lo = ctl_lo.ctl_lo;
767 desc->lli_length = sg_len;
768 desc->current_lli = 0;
769 /* DMA coherent memory pool for LLI descriptors*/
770 desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool",
771 midc->dma->pdev,
772 (sizeof(struct intel_mid_dma_lli)*sg_len),
773 32, 0);
774 if (NULL == desc->lli_pool) {
775 pr_err("MID_DMA:LLI pool create failed\n");
776 return NULL;
777 }
778
779 desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys);
780 if (!desc->lli) {
781 pr_err("MID_DMA: LLI alloc failed\n");
782 pci_pool_destroy(desc->lli_pool);
783 return NULL;
784 }
785
786 midc_lli_fill_sg(midc, desc, sgl, sg_len, flags);
787 if (flags & DMA_PREP_INTERRUPT) {
788 iowrite32(UNMASK_INTR_REG(midc->ch_id),
789 midc->dma_base + MASK_BLOCK);
790 pr_debug("MDMA:Enabled Block interrupt\n");
791 }
792 return &desc->txd;
793}
608 794
609/** 795/**
610 * intel_mid_dma_free_chan_resources - Frees dma resources 796 * intel_mid_dma_free_chan_resources - Frees dma resources
@@ -618,11 +804,11 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
618 struct middma_device *mid = to_middma_device(chan->device); 804 struct middma_device *mid = to_middma_device(chan->device);
619 struct intel_mid_dma_desc *desc, *_desc; 805 struct intel_mid_dma_desc *desc, *_desc;
620 806
621 if (true == midc->in_use) { 807 if (true == midc->busy) {
622 /*trying to free ch in use!!!!!*/ 808 /*trying to free ch in use!!!!!*/
623 pr_err("ERR_MDMA: trying to free ch in use\n"); 809 pr_err("ERR_MDMA: trying to free ch in use\n");
624 } 810 }
625 811 pm_runtime_put(&mid->pdev->dev);
626 spin_lock_bh(&midc->lock); 812 spin_lock_bh(&midc->lock);
627 midc->descs_allocated = 0; 813 midc->descs_allocated = 0;
628 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 814 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
@@ -639,6 +825,7 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
639 } 825 }
640 spin_unlock_bh(&midc->lock); 826 spin_unlock_bh(&midc->lock);
641 midc->in_use = false; 827 midc->in_use = false;
828 midc->busy = false;
642 /* Disable CH interrupts */ 829 /* Disable CH interrupts */
643 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); 830 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK);
644 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); 831 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR);
@@ -659,11 +846,20 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
659 dma_addr_t phys; 846 dma_addr_t phys;
660 int i = 0; 847 int i = 0;
661 848
849 pm_runtime_get_sync(&mid->pdev->dev);
850
851 if (mid->state == SUSPENDED) {
852 if (dma_resume(mid->pdev)) {
853 pr_err("ERR_MDMA: resume failed");
854 return -EFAULT;
855 }
856 }
662 857
663 /* ASSERT: channel is idle */ 858 /* ASSERT: channel is idle */
664 if (test_ch_en(mid->dma_base, midc->ch_id)) { 859 if (test_ch_en(mid->dma_base, midc->ch_id)) {
665 /*ch is not idle*/ 860 /*ch is not idle*/
666 pr_err("ERR_MDMA: ch not idle\n"); 861 pr_err("ERR_MDMA: ch not idle\n");
862 pm_runtime_put(&mid->pdev->dev);
667 return -EIO; 863 return -EIO;
668 } 864 }
669 midc->completed = chan->cookie = 1; 865 midc->completed = chan->cookie = 1;
@@ -674,6 +870,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
674 desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys); 870 desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys);
675 if (!desc) { 871 if (!desc) {
676 pr_err("ERR_MDMA: desc failed\n"); 872 pr_err("ERR_MDMA: desc failed\n");
873 pm_runtime_put(&mid->pdev->dev);
677 return -ENOMEM; 874 return -ENOMEM;
678 /*check*/ 875 /*check*/
679 } 876 }
@@ -686,7 +883,8 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
686 list_add_tail(&desc->desc_node, &midc->free_list); 883 list_add_tail(&desc->desc_node, &midc->free_list);
687 } 884 }
688 spin_unlock_bh(&midc->lock); 885 spin_unlock_bh(&midc->lock);
689 midc->in_use = false; 886 midc->in_use = true;
887 midc->busy = false;
690 pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i); 888 pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i);
691 return i; 889 return i;
692} 890}
@@ -715,7 +913,7 @@ static void dma_tasklet(unsigned long data)
715{ 913{
716 struct middma_device *mid = NULL; 914 struct middma_device *mid = NULL;
717 struct intel_mid_dma_chan *midc = NULL; 915 struct intel_mid_dma_chan *midc = NULL;
718 u32 status; 916 u32 status, raw_tfr, raw_block;
719 int i; 917 int i;
720 918
721 mid = (struct middma_device *)data; 919 mid = (struct middma_device *)data;
@@ -724,8 +922,9 @@ static void dma_tasklet(unsigned long data)
724 return; 922 return;
725 } 923 }
726 pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id); 924 pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id);
727 status = ioread32(mid->dma_base + RAW_TFR); 925 raw_tfr = ioread32(mid->dma_base + RAW_TFR);
728 pr_debug("MDMA:RAW_TFR %x\n", status); 926 raw_block = ioread32(mid->dma_base + RAW_BLOCK);
927 status = raw_tfr | raw_block;
729 status &= mid->intr_mask; 928 status &= mid->intr_mask;
730 while (status) { 929 while (status) {
731 /*txn interrupt*/ 930 /*txn interrupt*/
@@ -741,15 +940,23 @@ static void dma_tasklet(unsigned long data)
741 } 940 }
742 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", 941 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
743 status, midc->ch_id, i); 942 status, midc->ch_id, i);
943 midc->raw_tfr = raw_tfr;
944 midc->raw_block = raw_block;
945 spin_lock_bh(&midc->lock);
744 /*clearing this interrupts first*/ 946 /*clearing this interrupts first*/
745 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR); 947 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR);
746 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_BLOCK); 948 if (raw_block) {
747 949 iowrite32((1 << midc->ch_id),
748 spin_lock_bh(&midc->lock); 950 mid->dma_base + CLEAR_BLOCK);
951 }
749 midc_scan_descriptors(mid, midc); 952 midc_scan_descriptors(mid, midc);
750 pr_debug("MDMA:Scan of desc... complete, unmasking\n"); 953 pr_debug("MDMA:Scan of desc... complete, unmasking\n");
751 iowrite32(UNMASK_INTR_REG(midc->ch_id), 954 iowrite32(UNMASK_INTR_REG(midc->ch_id),
752 mid->dma_base + MASK_TFR); 955 mid->dma_base + MASK_TFR);
956 if (raw_block) {
957 iowrite32(UNMASK_INTR_REG(midc->ch_id),
958 mid->dma_base + MASK_BLOCK);
959 }
753 spin_unlock_bh(&midc->lock); 960 spin_unlock_bh(&midc->lock);
754 } 961 }
755 962
@@ -804,9 +1011,14 @@ static void dma_tasklet2(unsigned long data)
804static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) 1011static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
805{ 1012{
806 struct middma_device *mid = data; 1013 struct middma_device *mid = data;
807 u32 status; 1014 u32 tfr_status, err_status;
808 int call_tasklet = 0; 1015 int call_tasklet = 0;
809 1016
1017 tfr_status = ioread32(mid->dma_base + RAW_TFR);
1018 err_status = ioread32(mid->dma_base + RAW_ERR);
1019 if (!tfr_status && !err_status)
1020 return IRQ_NONE;
1021
810 /*DMA Interrupt*/ 1022 /*DMA Interrupt*/
811 pr_debug("MDMA:Got an interrupt on irq %d\n", irq); 1023 pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
812 if (!mid) { 1024 if (!mid) {
@@ -814,19 +1026,18 @@ static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
814 return -EINVAL; 1026 return -EINVAL;
815 } 1027 }
816 1028
817 status = ioread32(mid->dma_base + RAW_TFR); 1029 pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask);
818 pr_debug("MDMA: Status %x, Mask %x\n", status, mid->intr_mask); 1030 tfr_status &= mid->intr_mask;
819 status &= mid->intr_mask; 1031 if (tfr_status) {
820 if (status) {
821 /*need to disable intr*/ 1032 /*need to disable intr*/
822 iowrite32((status << 8), mid->dma_base + MASK_TFR); 1033 iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR);
823 pr_debug("MDMA: Calling tasklet %x\n", status); 1034 iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK);
1035 pr_debug("MDMA: Calling tasklet %x\n", tfr_status);
824 call_tasklet = 1; 1036 call_tasklet = 1;
825 } 1037 }
826 status = ioread32(mid->dma_base + RAW_ERR); 1038 err_status &= mid->intr_mask;
827 status &= mid->intr_mask; 1039 if (err_status) {
828 if (status) { 1040 iowrite32(MASK_INTR_REG(err_status), mid->dma_base + MASK_ERR);
829 iowrite32(MASK_INTR_REG(status), mid->dma_base + MASK_ERR);
830 call_tasklet = 1; 1041 call_tasklet = 1;
831 } 1042 }
832 if (call_tasklet) 1043 if (call_tasklet)
@@ -856,7 +1067,6 @@ static int mid_setup_dma(struct pci_dev *pdev)
856{ 1067{
857 struct middma_device *dma = pci_get_drvdata(pdev); 1068 struct middma_device *dma = pci_get_drvdata(pdev);
858 int err, i; 1069 int err, i;
859 unsigned int irq_level;
860 1070
861 /* DMA coherent memory pool for DMA descriptor allocations */ 1071 /* DMA coherent memory pool for DMA descriptor allocations */
862 dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev, 1072 dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev,
@@ -884,6 +1094,7 @@ static int mid_setup_dma(struct pci_dev *pdev)
884 pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan); 1094 pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan);
885 /*init CH structures*/ 1095 /*init CH structures*/
886 dma->intr_mask = 0; 1096 dma->intr_mask = 0;
1097 dma->state = RUNNING;
887 for (i = 0; i < dma->max_chan; i++) { 1098 for (i = 0; i < dma->max_chan; i++) {
888 struct intel_mid_dma_chan *midch = &dma->ch[i]; 1099 struct intel_mid_dma_chan *midch = &dma->ch[i];
889 1100
@@ -943,7 +1154,6 @@ static int mid_setup_dma(struct pci_dev *pdev)
943 1154
944 /*register irq */ 1155 /*register irq */
945 if (dma->pimr_mask) { 1156 if (dma->pimr_mask) {
946 irq_level = IRQF_SHARED;
947 pr_debug("MDMA:Requesting irq shared for DMAC1\n"); 1157 pr_debug("MDMA:Requesting irq shared for DMAC1\n");
948 err = request_irq(pdev->irq, intel_mid_dma_interrupt1, 1158 err = request_irq(pdev->irq, intel_mid_dma_interrupt1,
949 IRQF_SHARED, "INTEL_MID_DMAC1", dma); 1159 IRQF_SHARED, "INTEL_MID_DMAC1", dma);
@@ -951,10 +1161,9 @@ static int mid_setup_dma(struct pci_dev *pdev)
951 goto err_irq; 1161 goto err_irq;
952 } else { 1162 } else {
953 dma->intr_mask = 0x03; 1163 dma->intr_mask = 0x03;
954 irq_level = 0;
955 pr_debug("MDMA:Requesting irq for DMAC2\n"); 1164 pr_debug("MDMA:Requesting irq for DMAC2\n");
956 err = request_irq(pdev->irq, intel_mid_dma_interrupt2, 1165 err = request_irq(pdev->irq, intel_mid_dma_interrupt2,
957 0, "INTEL_MID_DMAC2", dma); 1166 IRQF_SHARED, "INTEL_MID_DMAC2", dma);
958 if (0 != err) 1167 if (0 != err)
959 goto err_irq; 1168 goto err_irq;
960 } 1169 }
@@ -1070,6 +1279,9 @@ static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
1070 if (err) 1279 if (err)
1071 goto err_dma; 1280 goto err_dma;
1072 1281
1282 pm_runtime_set_active(&pdev->dev);
1283 pm_runtime_enable(&pdev->dev);
1284 pm_runtime_allow(&pdev->dev);
1073 return 0; 1285 return 0;
1074 1286
1075err_dma: 1287err_dma:
@@ -1104,6 +1316,85 @@ static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
1104 pci_disable_device(pdev); 1316 pci_disable_device(pdev);
1105} 1317}
1106 1318
1319/* Power Management */
1320/*
1321* dma_suspend - PCI suspend function
1322*
1323* @pci: PCI device structure
1324* @state: PM message
1325*
1326* This function is called by OS when a power event occurs
1327*/
1328int dma_suspend(struct pci_dev *pci, pm_message_t state)
1329{
1330 int i;
1331 struct middma_device *device = pci_get_drvdata(pci);
1332 pr_debug("MDMA: dma_suspend called\n");
1333
1334 for (i = 0; i < device->max_chan; i++) {
1335 if (device->ch[i].in_use)
1336 return -EAGAIN;
1337 }
1338 device->state = SUSPENDED;
1339 pci_set_drvdata(pci, device);
1340 pci_save_state(pci);
1341 pci_disable_device(pci);
1342 pci_set_power_state(pci, PCI_D3hot);
1343 return 0;
1344}
1345
1346/**
1347* dma_resume - PCI resume function
1348*
1349* @pci: PCI device structure
1350*
1351* This function is called by OS when a power event occurs
1352*/
1353int dma_resume(struct pci_dev *pci)
1354{
1355 int ret;
1356 struct middma_device *device = pci_get_drvdata(pci);
1357
1358 pr_debug("MDMA: dma_resume called\n");
1359 pci_set_power_state(pci, PCI_D0);
1360 pci_restore_state(pci);
1361 ret = pci_enable_device(pci);
1362 if (ret) {
1363 pr_err("MDMA: device cant be enabled for %x\n", pci->device);
1364 return ret;
1365 }
1366 device->state = RUNNING;
1367 iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
1368 pci_set_drvdata(pci, device);
1369 return 0;
1370}
1371
1372static int dma_runtime_suspend(struct device *dev)
1373{
1374 struct pci_dev *pci_dev = to_pci_dev(dev);
1375 return dma_suspend(pci_dev, PMSG_SUSPEND);
1376}
1377
1378static int dma_runtime_resume(struct device *dev)
1379{
1380 struct pci_dev *pci_dev = to_pci_dev(dev);
1381 return dma_resume(pci_dev);
1382}
1383
1384static int dma_runtime_idle(struct device *dev)
1385{
1386 struct pci_dev *pdev = to_pci_dev(dev);
1387 struct middma_device *device = pci_get_drvdata(pdev);
1388 int i;
1389
1390 for (i = 0; i < device->max_chan; i++) {
1391 if (device->ch[i].in_use)
1392 return -EAGAIN;
1393 }
1394
1395 return pm_schedule_suspend(dev, 0);
1396}
1397
1107/****************************************************************************** 1398/******************************************************************************
1108* PCI stuff 1399* PCI stuff
1109*/ 1400*/
@@ -1116,11 +1407,24 @@ static struct pci_device_id intel_mid_dma_ids[] = {
1116}; 1407};
1117MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids); 1408MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids);
1118 1409
1410static const struct dev_pm_ops intel_mid_dma_pm = {
1411 .runtime_suspend = dma_runtime_suspend,
1412 .runtime_resume = dma_runtime_resume,
1413 .runtime_idle = dma_runtime_idle,
1414};
1415
1119static struct pci_driver intel_mid_dma_pci = { 1416static struct pci_driver intel_mid_dma_pci = {
1120 .name = "Intel MID DMA", 1417 .name = "Intel MID DMA",
1121 .id_table = intel_mid_dma_ids, 1418 .id_table = intel_mid_dma_ids,
1122 .probe = intel_mid_dma_probe, 1419 .probe = intel_mid_dma_probe,
1123 .remove = __devexit_p(intel_mid_dma_remove), 1420 .remove = __devexit_p(intel_mid_dma_remove),
1421#ifdef CONFIG_PM
1422 .suspend = dma_suspend,
1423 .resume = dma_resume,
1424 .driver = {
1425 .pm = &intel_mid_dma_pm,
1426 },
1427#endif
1124}; 1428};
1125 1429
1126static int __init intel_mid_dma_init(void) 1430static int __init intel_mid_dma_init(void)
diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h
index d81aa658ab09..709fecbdde79 100644
--- a/drivers/dma/intel_mid_dma_regs.h
+++ b/drivers/dma/intel_mid_dma_regs.h
@@ -29,11 +29,12 @@
29#include <linux/dmapool.h> 29#include <linux/dmapool.h>
30#include <linux/pci_ids.h> 30#include <linux/pci_ids.h>
31 31
32#define INTEL_MID_DMA_DRIVER_VERSION "1.0.5" 32#define INTEL_MID_DMA_DRIVER_VERSION "1.1.0"
33 33
34#define REG_BIT0 0x00000001 34#define REG_BIT0 0x00000001
35#define REG_BIT8 0x00000100 35#define REG_BIT8 0x00000100
36 36#define INT_MASK_WE 0x8
37#define CLEAR_DONE 0xFFFFEFFF
37#define UNMASK_INTR_REG(chan_num) \ 38#define UNMASK_INTR_REG(chan_num) \
38 ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num)) 39 ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num))
39#define MASK_INTR_REG(chan_num) (REG_BIT8 << chan_num) 40#define MASK_INTR_REG(chan_num) (REG_BIT8 << chan_num)
@@ -41,6 +42,9 @@
41#define ENABLE_CHANNEL(chan_num) \ 42#define ENABLE_CHANNEL(chan_num) \
42 ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num)) 43 ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num))
43 44
45#define DISABLE_CHANNEL(chan_num) \
46 (REG_BIT8 << chan_num)
47
44#define DESCS_PER_CHANNEL 16 48#define DESCS_PER_CHANNEL 16
45/*DMA Registers*/ 49/*DMA Registers*/
46/*registers associated with channel programming*/ 50/*registers associated with channel programming*/
@@ -50,6 +54,7 @@
50/*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/ 54/*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/
51#define SAR 0x00 /* Source Address Register*/ 55#define SAR 0x00 /* Source Address Register*/
52#define DAR 0x08 /* Destination Address Register*/ 56#define DAR 0x08 /* Destination Address Register*/
57#define LLP 0x10 /* Linked List Pointer Register*/
53#define CTL_LOW 0x18 /* Control Register*/ 58#define CTL_LOW 0x18 /* Control Register*/
54#define CTL_HIGH 0x1C /* Control Register*/ 59#define CTL_HIGH 0x1C /* Control Register*/
55#define CFG_LOW 0x40 /* Configuration Register Low*/ 60#define CFG_LOW 0x40 /* Configuration Register Low*/
@@ -112,8 +117,8 @@ union intel_mid_dma_ctl_lo {
112union intel_mid_dma_ctl_hi { 117union intel_mid_dma_ctl_hi {
113 struct { 118 struct {
114 u32 block_ts:12; /*block transfer size*/ 119 u32 block_ts:12; /*block transfer size*/
115 /*configured by DMAC*/ 120 u32 done:1; /*Done - updated by DMAC*/
116 u32 reser:20; 121 u32 reser:19; /*configured by DMAC*/
117 } ctlx; 122 } ctlx;
118 u32 ctl_hi; 123 u32 ctl_hi;
119 124
@@ -152,6 +157,7 @@ union intel_mid_dma_cfg_hi {
152 u32 cfg_hi; 157 u32 cfg_hi;
153}; 158};
154 159
160
155/** 161/**
156 * struct intel_mid_dma_chan - internal mid representation of a DMA channel 162 * struct intel_mid_dma_chan - internal mid representation of a DMA channel
157 * @chan: dma_chan strcture represetation for mid chan 163 * @chan: dma_chan strcture represetation for mid chan
@@ -166,7 +172,10 @@ union intel_mid_dma_cfg_hi {
166 * @slave: dma slave struture 172 * @slave: dma slave struture
167 * @descs_allocated: total number of decsiptors allocated 173 * @descs_allocated: total number of decsiptors allocated
168 * @dma: dma device struture pointer 174 * @dma: dma device struture pointer
175 * @busy: bool representing if ch is busy (active txn) or not
169 * @in_use: bool representing if ch is in use or not 176 * @in_use: bool representing if ch is in use or not
177 * @raw_tfr: raw trf interrupt recieved
178 * @raw_block: raw block interrupt recieved
170 */ 179 */
171struct intel_mid_dma_chan { 180struct intel_mid_dma_chan {
172 struct dma_chan chan; 181 struct dma_chan chan;
@@ -178,10 +187,13 @@ struct intel_mid_dma_chan {
178 struct list_head active_list; 187 struct list_head active_list;
179 struct list_head queue; 188 struct list_head queue;
180 struct list_head free_list; 189 struct list_head free_list;
181 struct intel_mid_dma_slave *slave;
182 unsigned int descs_allocated; 190 unsigned int descs_allocated;
183 struct middma_device *dma; 191 struct middma_device *dma;
192 bool busy;
184 bool in_use; 193 bool in_use;
194 u32 raw_tfr;
195 u32 raw_block;
196 struct intel_mid_dma_slave *mid_slave;
185}; 197};
186 198
187static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan( 199static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan(
@@ -190,6 +202,10 @@ static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan(
190 return container_of(chan, struct intel_mid_dma_chan, chan); 202 return container_of(chan, struct intel_mid_dma_chan, chan);
191} 203}
192 204
205enum intel_mid_dma_state {
206 RUNNING = 0,
207 SUSPENDED,
208};
193/** 209/**
194 * struct middma_device - internal representation of a DMA device 210 * struct middma_device - internal representation of a DMA device
195 * @pdev: PCI device 211 * @pdev: PCI device
@@ -205,6 +221,7 @@ static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan(
205 * @max_chan: max number of chs supported (from drv_data) 221 * @max_chan: max number of chs supported (from drv_data)
206 * @block_size: Block size of DMA transfer supported (from drv_data) 222 * @block_size: Block size of DMA transfer supported (from drv_data)
207 * @pimr_mask: MMIO register addr for periphral interrupt (from drv_data) 223 * @pimr_mask: MMIO register addr for periphral interrupt (from drv_data)
224 * @state: dma PM device state
208 */ 225 */
209struct middma_device { 226struct middma_device {
210 struct pci_dev *pdev; 227 struct pci_dev *pdev;
@@ -220,6 +237,7 @@ struct middma_device {
220 int max_chan; 237 int max_chan;
221 int block_size; 238 int block_size;
222 unsigned int pimr_mask; 239 unsigned int pimr_mask;
240 enum intel_mid_dma_state state;
223}; 241};
224 242
225static inline struct middma_device *to_middma_device(struct dma_device *common) 243static inline struct middma_device *to_middma_device(struct dma_device *common)
@@ -238,14 +256,27 @@ struct intel_mid_dma_desc {
238 u32 cfg_lo; 256 u32 cfg_lo;
239 u32 ctl_lo; 257 u32 ctl_lo;
240 u32 ctl_hi; 258 u32 ctl_hi;
259 struct pci_pool *lli_pool;
260 struct intel_mid_dma_lli *lli;
261 dma_addr_t lli_phys;
262 unsigned int lli_length;
263 unsigned int current_lli;
241 dma_addr_t next; 264 dma_addr_t next;
242 enum dma_data_direction dirn; 265 enum dma_data_direction dirn;
243 enum dma_status status; 266 enum dma_status status;
244 enum intel_mid_dma_width width; /*width of DMA txn*/ 267 enum dma_slave_buswidth width; /*width of DMA txn*/
245 enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ 268 enum intel_mid_dma_mode cfg_mode; /*mode configuration*/
246 269
247}; 270};
248 271
272struct intel_mid_dma_lli {
273 dma_addr_t sar;
274 dma_addr_t dar;
275 dma_addr_t llp;
276 u32 ctl_lo;
277 u32 ctl_hi;
278} __attribute__ ((packed));
279
249static inline int test_ch_en(void __iomem *dma, u32 ch_no) 280static inline int test_ch_en(void __iomem *dma, u32 ch_no)
250{ 281{
251 u32 en_reg = ioread32(dma + DMA_CHAN_EN); 282 u32 en_reg = ioread32(dma + DMA_CHAN_EN);
@@ -257,4 +288,14 @@ static inline struct intel_mid_dma_desc *to_intel_mid_dma_desc
257{ 288{
258 return container_of(txd, struct intel_mid_dma_desc, txd); 289 return container_of(txd, struct intel_mid_dma_desc, txd);
259} 290}
291
292static inline struct intel_mid_dma_slave *to_intel_mid_dma_slave
293 (struct dma_slave_config *slave)
294{
295 return container_of(slave, struct intel_mid_dma_slave, dma_slave);
296}
297
298
299int dma_resume(struct pci_dev *pci);
300
260#endif /*__INTEL_MID_DMAC_REGS_H__*/ 301#endif /*__INTEL_MID_DMAC_REGS_H__*/
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index d5fd098e22e8..3f76cd9af7c3 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1,11 +1,8 @@
1/* 1/*
2 * driver/dma/ste_dma40.c 2 * Copyright (C) ST-Ericsson SA 2007-2010
3 * 3 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
4 * Copyright (C) ST-Ericsson 2007-2010 4 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2 5 * License terms: GNU General Public License (GPL) version 2
6 * Author: Per Friden <per.friden@stericsson.com>
7 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
8 *
9 */ 6 */
10 7
11#include <linux/kernel.h> 8#include <linux/kernel.h>
@@ -14,6 +11,7 @@
14#include <linux/platform_device.h> 11#include <linux/platform_device.h>
15#include <linux/clk.h> 12#include <linux/clk.h>
16#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/err.h>
17 15
18#include <plat/ste_dma40.h> 16#include <plat/ste_dma40.h>
19 17
@@ -32,6 +30,11 @@
32 30
33/* Hardware requirement on LCLA alignment */ 31/* Hardware requirement on LCLA alignment */
34#define LCLA_ALIGNMENT 0x40000 32#define LCLA_ALIGNMENT 0x40000
33
34/* Max number of links per event group */
35#define D40_LCLA_LINK_PER_EVENT_GRP 128
36#define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
37
35/* Attempts before giving up to trying to get pages that are aligned */ 38/* Attempts before giving up to trying to get pages that are aligned */
36#define MAX_LCLA_ALLOC_ATTEMPTS 256 39#define MAX_LCLA_ALLOC_ATTEMPTS 256
37 40
@@ -41,7 +44,7 @@
41#define D40_ALLOC_LOG_FREE 0 44#define D40_ALLOC_LOG_FREE 0
42 45
43/* Hardware designer of the block */ 46/* Hardware designer of the block */
44#define D40_PERIPHID2_DESIGNER 0x8 47#define D40_HW_DESIGNER 0x8
45 48
46/** 49/**
47 * enum 40_command - The different commands and/or statuses. 50 * enum 40_command - The different commands and/or statuses.
@@ -84,18 +87,17 @@ struct d40_lli_pool {
84 * @lli_log: Same as above but for logical channels. 87 * @lli_log: Same as above but for logical channels.
85 * @lli_pool: The pool with two entries pre-allocated. 88 * @lli_pool: The pool with two entries pre-allocated.
86 * @lli_len: Number of llis of current descriptor. 89 * @lli_len: Number of llis of current descriptor.
87 * @lli_count: Number of transfered llis. 90 * @lli_current: Number of transfered llis.
88 * @lli_tx_len: Max number of LLIs per transfer, there can be 91 * @lcla_alloc: Number of LCLA entries allocated.
89 * many transfer for one descriptor.
90 * @txd: DMA engine struct. Used for among other things for communication 92 * @txd: DMA engine struct. Used for among other things for communication
91 * during a transfer. 93 * during a transfer.
92 * @node: List entry. 94 * @node: List entry.
93 * @dir: The transfer direction of this job.
94 * @is_in_client_list: true if the client owns this descriptor. 95 * @is_in_client_list: true if the client owns this descriptor.
96 * @is_hw_linked: true if this job will automatically be continued for
97 * the previous one.
95 * 98 *
96 * This descriptor is used for both logical and physical transfers. 99 * This descriptor is used for both logical and physical transfers.
97 */ 100 */
98
99struct d40_desc { 101struct d40_desc {
100 /* LLI physical */ 102 /* LLI physical */
101 struct d40_phy_lli_bidir lli_phy; 103 struct d40_phy_lli_bidir lli_phy;
@@ -104,14 +106,14 @@ struct d40_desc {
104 106
105 struct d40_lli_pool lli_pool; 107 struct d40_lli_pool lli_pool;
106 int lli_len; 108 int lli_len;
107 int lli_count; 109 int lli_current;
108 u32 lli_tx_len; 110 int lcla_alloc;
109 111
110 struct dma_async_tx_descriptor txd; 112 struct dma_async_tx_descriptor txd;
111 struct list_head node; 113 struct list_head node;
112 114
113 enum dma_data_direction dir;
114 bool is_in_client_list; 115 bool is_in_client_list;
116 bool is_hw_linked;
115}; 117};
116 118
117/** 119/**
@@ -123,17 +125,14 @@ struct d40_desc {
123 * @pages: The number of pages needed for all physical channels. 125 * @pages: The number of pages needed for all physical channels.
124 * Only used later for clean-up on error 126 * Only used later for clean-up on error
125 * @lock: Lock to protect the content in this struct. 127 * @lock: Lock to protect the content in this struct.
126 * @alloc_map: Bitmap mapping between physical channel and LCLA entries. 128 * @alloc_map: big map over which LCLA entry is own by which job.
127 * @num_blocks: The number of entries of alloc_map. Equals to the
128 * number of physical channels.
129 */ 129 */
130struct d40_lcla_pool { 130struct d40_lcla_pool {
131 void *base; 131 void *base;
132 void *base_unaligned; 132 void *base_unaligned;
133 int pages; 133 int pages;
134 spinlock_t lock; 134 spinlock_t lock;
135 u32 *alloc_map; 135 struct d40_desc **alloc_map;
136 int num_blocks;
137}; 136};
138 137
139/** 138/**
@@ -146,9 +145,7 @@ struct d40_lcla_pool {
146 * this physical channel. Can also be free or physically allocated. 145 * this physical channel. Can also be free or physically allocated.
147 * @allocated_dst: Same as for src but is dst. 146 * @allocated_dst: Same as for src but is dst.
148 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as 147 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
149 * event line number. Both allocated_src and allocated_dst can not be 148 * event line number.
150 * allocated to a physical channel, since the interrupt handler has then
151 * no way of figure out which one the interrupt belongs to.
152 */ 149 */
153struct d40_phy_res { 150struct d40_phy_res {
154 spinlock_t lock; 151 spinlock_t lock;
@@ -206,7 +203,6 @@ struct d40_chan {
206 u32 src_def_cfg; 203 u32 src_def_cfg;
207 u32 dst_def_cfg; 204 u32 dst_def_cfg;
208 struct d40_def_lcsp log_def; 205 struct d40_def_lcsp log_def;
209 struct d40_lcla_elem lcla;
210 struct d40_log_lli_full *lcpa; 206 struct d40_log_lli_full *lcpa;
211 /* Runtime reconfiguration */ 207 /* Runtime reconfiguration */
212 dma_addr_t runtime_addr; 208 dma_addr_t runtime_addr;
@@ -234,7 +230,6 @@ struct d40_chan {
234 * @dma_both: dma_device channels that can do both memcpy and slave transfers. 230 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
235 * @dma_slave: dma_device channels that can do only do slave transfers. 231 * @dma_slave: dma_device channels that can do only do slave transfers.
236 * @dma_memcpy: dma_device channels that can do only do memcpy transfers. 232 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
237 * @phy_chans: Room for all possible physical channels in system.
238 * @log_chans: Room for all possible logical channels in system. 233 * @log_chans: Room for all possible logical channels in system.
239 * @lookup_log_chans: Used to map interrupt number to logical channel. Points 234 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
240 * to log_chans entries. 235 * to log_chans entries.
@@ -340,9 +335,6 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d,
340 align); 335 align);
341 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len, 336 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
342 align); 337 align);
343
344 d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src);
345 d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst);
346 } 338 }
347 339
348 return 0; 340 return 0;
@@ -357,22 +349,67 @@ static void d40_pool_lli_free(struct d40_desc *d40d)
357 d40d->lli_log.dst = NULL; 349 d40d->lli_log.dst = NULL;
358 d40d->lli_phy.src = NULL; 350 d40d->lli_phy.src = NULL;
359 d40d->lli_phy.dst = NULL; 351 d40d->lli_phy.dst = NULL;
360 d40d->lli_phy.src_addr = 0;
361 d40d->lli_phy.dst_addr = 0;
362} 352}
363 353
364static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c, 354static int d40_lcla_alloc_one(struct d40_chan *d40c,
365 struct d40_desc *desc) 355 struct d40_desc *d40d)
356{
357 unsigned long flags;
358 int i;
359 int ret = -EINVAL;
360 int p;
361
362 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
363
364 p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP;
365
366 /*
367 * Allocate both src and dst at the same time, therefore the half
368 * start on 1 since 0 can't be used since zero is used as end marker.
369 */
370 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
371 if (!d40c->base->lcla_pool.alloc_map[p + i]) {
372 d40c->base->lcla_pool.alloc_map[p + i] = d40d;
373 d40d->lcla_alloc++;
374 ret = i;
375 break;
376 }
377 }
378
379 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
380
381 return ret;
382}
383
384static int d40_lcla_free_all(struct d40_chan *d40c,
385 struct d40_desc *d40d)
366{ 386{
367 dma_cookie_t cookie = d40c->chan.cookie; 387 unsigned long flags;
388 int i;
389 int ret = -EINVAL;
390
391 if (d40c->log_num == D40_PHY_CHAN)
392 return 0;
368 393
369 if (++cookie < 0) 394 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
370 cookie = 1; 395
396 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
397 if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
398 D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) {
399 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
400 D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL;
401 d40d->lcla_alloc--;
402 if (d40d->lcla_alloc == 0) {
403 ret = 0;
404 break;
405 }
406 }
407 }
408
409 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
371 410
372 d40c->chan.cookie = cookie; 411 return ret;
373 desc->txd.cookie = cookie;
374 412
375 return cookie;
376} 413}
377 414
378static void d40_desc_remove(struct d40_desc *d40d) 415static void d40_desc_remove(struct d40_desc *d40d)
@@ -382,28 +419,35 @@ static void d40_desc_remove(struct d40_desc *d40d)
382 419
383static struct d40_desc *d40_desc_get(struct d40_chan *d40c) 420static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
384{ 421{
385 struct d40_desc *d; 422 struct d40_desc *desc = NULL;
386 struct d40_desc *_d;
387 423
388 if (!list_empty(&d40c->client)) { 424 if (!list_empty(&d40c->client)) {
425 struct d40_desc *d;
426 struct d40_desc *_d;
427
389 list_for_each_entry_safe(d, _d, &d40c->client, node) 428 list_for_each_entry_safe(d, _d, &d40c->client, node)
390 if (async_tx_test_ack(&d->txd)) { 429 if (async_tx_test_ack(&d->txd)) {
391 d40_pool_lli_free(d); 430 d40_pool_lli_free(d);
392 d40_desc_remove(d); 431 d40_desc_remove(d);
432 desc = d;
433 memset(desc, 0, sizeof(*desc));
393 break; 434 break;
394 } 435 }
395 } else {
396 d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT);
397 if (d != NULL) {
398 memset(d, 0, sizeof(struct d40_desc));
399 INIT_LIST_HEAD(&d->node);
400 }
401 } 436 }
402 return d; 437
438 if (!desc)
439 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
440
441 if (desc)
442 INIT_LIST_HEAD(&desc->node);
443
444 return desc;
403} 445}
404 446
405static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) 447static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
406{ 448{
449
450 d40_lcla_free_all(d40c, d40d);
407 kmem_cache_free(d40c->base->desc_slab, d40d); 451 kmem_cache_free(d40c->base->desc_slab, d40d);
408} 452}
409 453
@@ -412,6 +456,59 @@ static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
412 list_add_tail(&desc->node, &d40c->active); 456 list_add_tail(&desc->node, &d40c->active);
413} 457}
414 458
459static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
460{
461 int curr_lcla = -EINVAL, next_lcla;
462
463 if (d40c->log_num == D40_PHY_CHAN) {
464 d40_phy_lli_write(d40c->base->virtbase,
465 d40c->phy_chan->num,
466 d40d->lli_phy.dst,
467 d40d->lli_phy.src);
468 d40d->lli_current = d40d->lli_len;
469 } else {
470
471 if ((d40d->lli_len - d40d->lli_current) > 1)
472 curr_lcla = d40_lcla_alloc_one(d40c, d40d);
473
474 d40_log_lli_lcpa_write(d40c->lcpa,
475 &d40d->lli_log.dst[d40d->lli_current],
476 &d40d->lli_log.src[d40d->lli_current],
477 curr_lcla);
478
479 d40d->lli_current++;
480 for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) {
481 struct d40_log_lli *lcla;
482
483 if (d40d->lli_current + 1 < d40d->lli_len)
484 next_lcla = d40_lcla_alloc_one(d40c, d40d);
485 else
486 next_lcla = -EINVAL;
487
488 lcla = d40c->base->lcla_pool.base +
489 d40c->phy_chan->num * 1024 +
490 8 * curr_lcla * 2;
491
492 d40_log_lli_lcla_write(lcla,
493 &d40d->lli_log.dst[d40d->lli_current],
494 &d40d->lli_log.src[d40d->lli_current],
495 next_lcla);
496
497 (void) dma_map_single(d40c->base->dev, lcla,
498 2 * sizeof(struct d40_log_lli),
499 DMA_TO_DEVICE);
500
501 curr_lcla = next_lcla;
502
503 if (curr_lcla == -EINVAL) {
504 d40d->lli_current++;
505 break;
506 }
507
508 }
509 }
510}
511
415static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) 512static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
416{ 513{
417 struct d40_desc *d; 514 struct d40_desc *d;
@@ -443,68 +540,26 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
443 return d; 540 return d;
444} 541}
445 542
446/* Support functions for logical channels */ 543static struct d40_desc *d40_last_queued(struct d40_chan *d40c)
447
448static int d40_lcla_id_get(struct d40_chan *d40c)
449{ 544{
450 int src_id = 0; 545 struct d40_desc *d;
451 int dst_id = 0;
452 struct d40_log_lli *lcla_lidx_base =
453 d40c->base->lcla_pool.base + d40c->phy_chan->num * 1024;
454 int i;
455 int lli_per_log = d40c->base->plat_data->llis_per_log;
456 unsigned long flags;
457
458 if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
459 return 0;
460
461 if (d40c->base->lcla_pool.num_blocks > 32)
462 return -EINVAL;
463
464 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
465
466 for (i = 0; i < d40c->base->lcla_pool.num_blocks; i++) {
467 if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &
468 (0x1 << i))) {
469 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |=
470 (0x1 << i);
471 break;
472 }
473 }
474 src_id = i;
475 if (src_id >= d40c->base->lcla_pool.num_blocks)
476 goto err;
477 546
478 for (; i < d40c->base->lcla_pool.num_blocks; i++) { 547 if (list_empty(&d40c->queue))
479 if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] & 548 return NULL;
480 (0x1 << i))) { 549 list_for_each_entry(d, &d40c->queue, node)
481 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |= 550 if (list_is_last(&d->node, &d40c->queue))
482 (0x1 << i);
483 break; 551 break;
484 } 552 return d;
485 }
486
487 dst_id = i;
488 if (dst_id == src_id)
489 goto err;
490
491 d40c->lcla.src_id = src_id;
492 d40c->lcla.dst_id = dst_id;
493 d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
494 d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
495
496 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
497 return 0;
498err:
499 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
500 return -EINVAL;
501} 553}
502 554
555/* Support functions for logical channels */
556
503 557
504static int d40_channel_execute_command(struct d40_chan *d40c, 558static int d40_channel_execute_command(struct d40_chan *d40c,
505 enum d40_command command) 559 enum d40_command command)
506{ 560{
507 int status, i; 561 u32 status;
562 int i;
508 void __iomem *active_reg; 563 void __iomem *active_reg;
509 int ret = 0; 564 int ret = 0;
510 unsigned long flags; 565 unsigned long flags;
@@ -567,35 +622,19 @@ done:
567static void d40_term_all(struct d40_chan *d40c) 622static void d40_term_all(struct d40_chan *d40c)
568{ 623{
569 struct d40_desc *d40d; 624 struct d40_desc *d40d;
570 unsigned long flags;
571 625
572 /* Release active descriptors */ 626 /* Release active descriptors */
573 while ((d40d = d40_first_active_get(d40c))) { 627 while ((d40d = d40_first_active_get(d40c))) {
574 d40_desc_remove(d40d); 628 d40_desc_remove(d40d);
575
576 /* Return desc to free-list */
577 d40_desc_free(d40c, d40d); 629 d40_desc_free(d40c, d40d);
578 } 630 }
579 631
580 /* Release queued descriptors waiting for transfer */ 632 /* Release queued descriptors waiting for transfer */
581 while ((d40d = d40_first_queued(d40c))) { 633 while ((d40d = d40_first_queued(d40c))) {
582 d40_desc_remove(d40d); 634 d40_desc_remove(d40d);
583
584 /* Return desc to free-list */
585 d40_desc_free(d40c, d40d); 635 d40_desc_free(d40c, d40d);
586 } 636 }
587 637
588 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
589
590 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &=
591 (~(0x1 << d40c->lcla.dst_id));
592 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &=
593 (~(0x1 << d40c->lcla.src_id));
594
595 d40c->lcla.src_id = -1;
596 d40c->lcla.dst_id = -1;
597
598 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
599 638
600 d40c->pending_tx = 0; 639 d40c->pending_tx = 0;
601 d40c->busy = false; 640 d40c->busy = false;
@@ -640,45 +679,22 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
640 679
641static u32 d40_chan_has_events(struct d40_chan *d40c) 680static u32 d40_chan_has_events(struct d40_chan *d40c)
642{ 681{
643 u32 val = 0; 682 u32 val;
644 683
645 /* If SSLNK or SDLNK is zero all events are disabled */ 684 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
646 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || 685 d40c->phy_chan->num * D40_DREG_PCDELTA +
647 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) 686 D40_CHAN_REG_SSLNK);
648 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
649 d40c->phy_chan->num * D40_DREG_PCDELTA +
650 D40_CHAN_REG_SSLNK);
651
652 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM)
653 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
654 d40c->phy_chan->num * D40_DREG_PCDELTA +
655 D40_CHAN_REG_SDLNK);
656 return val;
657}
658 687
659static void d40_config_enable_lidx(struct d40_chan *d40c) 688 val |= readl(d40c->base->virtbase + D40_DREG_PCBASE +
660{ 689 d40c->phy_chan->num * D40_DREG_PCDELTA +
661 /* Set LIDX for lcla */ 690 D40_CHAN_REG_SDLNK);
662 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & 691 return val;
663 D40_SREG_ELEM_LOG_LIDX_MASK,
664 d40c->base->virtbase + D40_DREG_PCBASE +
665 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
666
667 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
668 D40_SREG_ELEM_LOG_LIDX_MASK,
669 d40c->base->virtbase + D40_DREG_PCBASE +
670 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
671} 692}
672 693
673static int d40_config_write(struct d40_chan *d40c) 694static void d40_config_write(struct d40_chan *d40c)
674{ 695{
675 u32 addr_base; 696 u32 addr_base;
676 u32 var; 697 u32 var;
677 int res;
678
679 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
680 if (res)
681 return res;
682 698
683 /* Odd addresses are even addresses + 4 */ 699 /* Odd addresses are even addresses + 4 */
684 addr_base = (d40c->phy_chan->num % 2) * 4; 700 addr_base = (d40c->phy_chan->num % 2) * 4;
@@ -704,41 +720,181 @@ static int d40_config_write(struct d40_chan *d40c)
704 d40c->phy_chan->num * D40_DREG_PCDELTA + 720 d40c->phy_chan->num * D40_DREG_PCDELTA +
705 D40_CHAN_REG_SDCFG); 721 D40_CHAN_REG_SDCFG);
706 722
707 d40_config_enable_lidx(d40c); 723 /* Set LIDX for lcla */
724 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
725 D40_SREG_ELEM_LOG_LIDX_MASK,
726 d40c->base->virtbase + D40_DREG_PCBASE +
727 d40c->phy_chan->num * D40_DREG_PCDELTA +
728 D40_CHAN_REG_SDELT);
729
730 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
731 D40_SREG_ELEM_LOG_LIDX_MASK,
732 d40c->base->virtbase + D40_DREG_PCBASE +
733 d40c->phy_chan->num * D40_DREG_PCDELTA +
734 D40_CHAN_REG_SSELT);
735
708 } 736 }
737}
738
739static u32 d40_residue(struct d40_chan *d40c)
740{
741 u32 num_elt;
742
743 if (d40c->log_num != D40_PHY_CHAN)
744 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
745 >> D40_MEM_LCSP2_ECNT_POS;
746 else
747 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
748 d40c->phy_chan->num * D40_DREG_PCDELTA +
749 D40_CHAN_REG_SDELT) &
750 D40_SREG_ELEM_PHY_ECNT_MASK) >>
751 D40_SREG_ELEM_PHY_ECNT_POS;
752 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
753}
754
755static bool d40_tx_is_linked(struct d40_chan *d40c)
756{
757 bool is_link;
758
759 if (d40c->log_num != D40_PHY_CHAN)
760 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
761 else
762 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
763 d40c->phy_chan->num * D40_DREG_PCDELTA +
764 D40_CHAN_REG_SDLNK) &
765 D40_SREG_LNK_PHYS_LNK_MASK;
766 return is_link;
767}
768
769static int d40_pause(struct dma_chan *chan)
770{
771 struct d40_chan *d40c =
772 container_of(chan, struct d40_chan, chan);
773 int res = 0;
774 unsigned long flags;
775
776 if (!d40c->busy)
777 return 0;
778
779 spin_lock_irqsave(&d40c->lock, flags);
780
781 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
782 if (res == 0) {
783 if (d40c->log_num != D40_PHY_CHAN) {
784 d40_config_set_event(d40c, false);
785 /* Resume the other logical channels if any */
786 if (d40_chan_has_events(d40c))
787 res = d40_channel_execute_command(d40c,
788 D40_DMA_RUN);
789 }
790 }
791
792 spin_unlock_irqrestore(&d40c->lock, flags);
709 return res; 793 return res;
710} 794}
711 795
712static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) 796static int d40_resume(struct dma_chan *chan)
713{ 797{
714 if (d40d->lli_phy.dst && d40d->lli_phy.src) { 798 struct d40_chan *d40c =
715 d40_phy_lli_write(d40c->base->virtbase, 799 container_of(chan, struct d40_chan, chan);
716 d40c->phy_chan->num, 800 int res = 0;
717 d40d->lli_phy.dst, 801 unsigned long flags;
718 d40d->lli_phy.src); 802
719 } else if (d40d->lli_log.dst && d40d->lli_log.src) { 803 if (!d40c->busy)
720 struct d40_log_lli *src = d40d->lli_log.src; 804 return 0;
721 struct d40_log_lli *dst = d40d->lli_log.dst; 805
722 int s; 806 spin_lock_irqsave(&d40c->lock, flags);
723 807
724 src += d40d->lli_count; 808 if (d40c->base->rev == 0)
725 dst += d40d->lli_count; 809 if (d40c->log_num != D40_PHY_CHAN) {
726 s = d40_log_lli_write(d40c->lcpa, 810 res = d40_channel_execute_command(d40c,
727 d40c->lcla.src, d40c->lcla.dst, 811 D40_DMA_SUSPEND_REQ);
728 dst, src, 812 goto no_suspend;
729 d40c->base->plat_data->llis_per_log);
730
731 /* If s equals to zero, the job is not linked */
732 if (s > 0) {
733 (void) dma_map_single(d40c->base->dev, d40c->lcla.src,
734 s * sizeof(struct d40_log_lli),
735 DMA_TO_DEVICE);
736 (void) dma_map_single(d40c->base->dev, d40c->lcla.dst,
737 s * sizeof(struct d40_log_lli),
738 DMA_TO_DEVICE);
739 } 813 }
814
815 /* If bytes left to transfer or linked tx resume job */
816 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
817
818 if (d40c->log_num != D40_PHY_CHAN)
819 d40_config_set_event(d40c, true);
820
821 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
822 }
823
824no_suspend:
825 spin_unlock_irqrestore(&d40c->lock, flags);
826 return res;
827}
828
829static void d40_tx_submit_log(struct d40_chan *d40c, struct d40_desc *d40d)
830{
831 /* TODO: Write */
832}
833
834static void d40_tx_submit_phy(struct d40_chan *d40c, struct d40_desc *d40d)
835{
836 struct d40_desc *d40d_prev = NULL;
837 int i;
838 u32 val;
839
840 if (!list_empty(&d40c->queue))
841 d40d_prev = d40_last_queued(d40c);
842 else if (!list_empty(&d40c->active))
843 d40d_prev = d40_first_active_get(d40c);
844
845 if (!d40d_prev)
846 return;
847
848 /* Here we try to join this job with previous jobs */
849 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
850 d40c->phy_chan->num * D40_DREG_PCDELTA +
851 D40_CHAN_REG_SSLNK);
852
853 /* Figure out which link we're currently transmitting */
854 for (i = 0; i < d40d_prev->lli_len; i++)
855 if (val == d40d_prev->lli_phy.src[i].reg_lnk)
856 break;
857
858 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
859 d40c->phy_chan->num * D40_DREG_PCDELTA +
860 D40_CHAN_REG_SSELT) >> D40_SREG_ELEM_LOG_ECNT_POS;
861
862 if (i == (d40d_prev->lli_len - 1) && val > 0) {
863 /* Change the current one */
864 writel(virt_to_phys(d40d->lli_phy.src),
865 d40c->base->virtbase + D40_DREG_PCBASE +
866 d40c->phy_chan->num * D40_DREG_PCDELTA +
867 D40_CHAN_REG_SSLNK);
868 writel(virt_to_phys(d40d->lli_phy.dst),
869 d40c->base->virtbase + D40_DREG_PCBASE +
870 d40c->phy_chan->num * D40_DREG_PCDELTA +
871 D40_CHAN_REG_SDLNK);
872
873 d40d->is_hw_linked = true;
874
875 } else if (i < d40d_prev->lli_len) {
876 (void) dma_unmap_single(d40c->base->dev,
877 virt_to_phys(d40d_prev->lli_phy.src),
878 d40d_prev->lli_pool.size,
879 DMA_TO_DEVICE);
880
881 /* Keep the settings */
882 val = d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk &
883 ~D40_SREG_LNK_PHYS_LNK_MASK;
884 d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk =
885 val | virt_to_phys(d40d->lli_phy.src);
886
887 val = d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk &
888 ~D40_SREG_LNK_PHYS_LNK_MASK;
889 d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk =
890 val | virt_to_phys(d40d->lli_phy.dst);
891
892 (void) dma_map_single(d40c->base->dev,
893 d40d_prev->lli_phy.src,
894 d40d_prev->lli_pool.size,
895 DMA_TO_DEVICE);
896 d40d->is_hw_linked = true;
740 } 897 }
741 d40d->lli_count += d40d->lli_tx_len;
742} 898}
743 899
744static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) 900static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
@@ -749,14 +905,28 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
749 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); 905 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
750 unsigned long flags; 906 unsigned long flags;
751 907
908 (void) d40_pause(&d40c->chan);
909
752 spin_lock_irqsave(&d40c->lock, flags); 910 spin_lock_irqsave(&d40c->lock, flags);
753 911
754 tx->cookie = d40_assign_cookie(d40c, d40d); 912 d40c->chan.cookie++;
913
914 if (d40c->chan.cookie < 0)
915 d40c->chan.cookie = 1;
916
917 d40d->txd.cookie = d40c->chan.cookie;
918
919 if (d40c->log_num == D40_PHY_CHAN)
920 d40_tx_submit_phy(d40c, d40d);
921 else
922 d40_tx_submit_log(d40c, d40d);
755 923
756 d40_desc_queue(d40c, d40d); 924 d40_desc_queue(d40c, d40d);
757 925
758 spin_unlock_irqrestore(&d40c->lock, flags); 926 spin_unlock_irqrestore(&d40c->lock, flags);
759 927
928 (void) d40_resume(&d40c->chan);
929
760 return tx->cookie; 930 return tx->cookie;
761} 931}
762 932
@@ -796,14 +966,21 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
796 /* Add to active queue */ 966 /* Add to active queue */
797 d40_desc_submit(d40c, d40d); 967 d40_desc_submit(d40c, d40d);
798 968
799 /* Initiate DMA job */ 969 /*
800 d40_desc_load(d40c, d40d); 970 * If this job is already linked in hw,
971 * do not submit it.
972 */
801 973
802 /* Start dma job */ 974 if (!d40d->is_hw_linked) {
803 err = d40_start(d40c); 975 /* Initiate DMA job */
976 d40_desc_load(d40c, d40d);
804 977
805 if (err) 978 /* Start dma job */
806 return NULL; 979 err = d40_start(d40c);
980
981 if (err)
982 return NULL;
983 }
807 } 984 }
808 985
809 return d40d; 986 return d40d;
@@ -814,17 +991,15 @@ static void dma_tc_handle(struct d40_chan *d40c)
814{ 991{
815 struct d40_desc *d40d; 992 struct d40_desc *d40d;
816 993
817 if (!d40c->phy_chan)
818 return;
819
820 /* Get first active entry from list */ 994 /* Get first active entry from list */
821 d40d = d40_first_active_get(d40c); 995 d40d = d40_first_active_get(d40c);
822 996
823 if (d40d == NULL) 997 if (d40d == NULL)
824 return; 998 return;
825 999
826 if (d40d->lli_count < d40d->lli_len) { 1000 d40_lcla_free_all(d40c, d40d);
827 1001
1002 if (d40d->lli_current < d40d->lli_len) {
828 d40_desc_load(d40c, d40d); 1003 d40_desc_load(d40c, d40d);
829 /* Start dma job */ 1004 /* Start dma job */
830 (void) d40_start(d40c); 1005 (void) d40_start(d40c);
@@ -842,7 +1017,7 @@ static void dma_tc_handle(struct d40_chan *d40c)
842static void dma_tasklet(unsigned long data) 1017static void dma_tasklet(unsigned long data)
843{ 1018{
844 struct d40_chan *d40c = (struct d40_chan *) data; 1019 struct d40_chan *d40c = (struct d40_chan *) data;
845 struct d40_desc *d40d_fin; 1020 struct d40_desc *d40d;
846 unsigned long flags; 1021 unsigned long flags;
847 dma_async_tx_callback callback; 1022 dma_async_tx_callback callback;
848 void *callback_param; 1023 void *callback_param;
@@ -850,12 +1025,12 @@ static void dma_tasklet(unsigned long data)
850 spin_lock_irqsave(&d40c->lock, flags); 1025 spin_lock_irqsave(&d40c->lock, flags);
851 1026
852 /* Get first active entry from list */ 1027 /* Get first active entry from list */
853 d40d_fin = d40_first_active_get(d40c); 1028 d40d = d40_first_active_get(d40c);
854 1029
855 if (d40d_fin == NULL) 1030 if (d40d == NULL)
856 goto err; 1031 goto err;
857 1032
858 d40c->completed = d40d_fin->txd.cookie; 1033 d40c->completed = d40d->txd.cookie;
859 1034
860 /* 1035 /*
861 * If terminating a channel pending_tx is set to zero. 1036 * If terminating a channel pending_tx is set to zero.
@@ -867,19 +1042,19 @@ static void dma_tasklet(unsigned long data)
867 } 1042 }
868 1043
869 /* Callback to client */ 1044 /* Callback to client */
870 callback = d40d_fin->txd.callback; 1045 callback = d40d->txd.callback;
871 callback_param = d40d_fin->txd.callback_param; 1046 callback_param = d40d->txd.callback_param;
872 1047
873 if (async_tx_test_ack(&d40d_fin->txd)) { 1048 if (async_tx_test_ack(&d40d->txd)) {
874 d40_pool_lli_free(d40d_fin); 1049 d40_pool_lli_free(d40d);
875 d40_desc_remove(d40d_fin); 1050 d40_desc_remove(d40d);
876 /* Return desc to free-list */ 1051 d40_desc_free(d40c, d40d);
877 d40_desc_free(d40c, d40d_fin);
878 } else { 1052 } else {
879 if (!d40d_fin->is_in_client_list) { 1053 if (!d40d->is_in_client_list) {
880 d40_desc_remove(d40d_fin); 1054 d40_desc_remove(d40d);
881 list_add_tail(&d40d_fin->node, &d40c->client); 1055 d40_lcla_free_all(d40c, d40d);
882 d40d_fin->is_in_client_list = true; 1056 list_add_tail(&d40d->node, &d40c->client);
1057 d40d->is_in_client_list = true;
883 } 1058 }
884 } 1059 }
885 1060
@@ -890,7 +1065,7 @@ static void dma_tasklet(unsigned long data)
890 1065
891 spin_unlock_irqrestore(&d40c->lock, flags); 1066 spin_unlock_irqrestore(&d40c->lock, flags);
892 1067
893 if (callback) 1068 if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
894 callback(callback_param); 1069 callback(callback_param);
895 1070
896 return; 1071 return;
@@ -919,7 +1094,6 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
919 1094
920 int i; 1095 int i;
921 u32 regs[ARRAY_SIZE(il)]; 1096 u32 regs[ARRAY_SIZE(il)];
922 u32 tmp;
923 u32 idx; 1097 u32 idx;
924 u32 row; 1098 u32 row;
925 long chan = -1; 1099 long chan = -1;
@@ -946,9 +1120,7 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
946 idx = chan & (BITS_PER_LONG - 1); 1120 idx = chan & (BITS_PER_LONG - 1);
947 1121
948 /* ACK interrupt */ 1122 /* ACK interrupt */
949 tmp = readl(base->virtbase + il[row].clr); 1123 writel(1 << idx, base->virtbase + il[row].clr);
950 tmp |= 1 << idx;
951 writel(tmp, base->virtbase + il[row].clr);
952 1124
953 if (il[row].offset == D40_PHY_CHAN) 1125 if (il[row].offset == D40_PHY_CHAN)
954 d40c = base->lookup_phy_chans[idx]; 1126 d40c = base->lookup_phy_chans[idx];
@@ -971,7 +1143,6 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
971 return IRQ_HANDLED; 1143 return IRQ_HANDLED;
972} 1144}
973 1145
974
975static int d40_validate_conf(struct d40_chan *d40c, 1146static int d40_validate_conf(struct d40_chan *d40c,
976 struct stedma40_chan_cfg *conf) 1147 struct stedma40_chan_cfg *conf)
977{ 1148{
@@ -981,14 +1152,39 @@ static int d40_validate_conf(struct d40_chan *d40c,
981 bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE) 1152 bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
982 == STEDMA40_CHANNEL_IN_LOG_MODE; 1153 == STEDMA40_CHANNEL_IN_LOG_MODE;
983 1154
984 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH && 1155 if (!conf->dir) {
1156 dev_err(&d40c->chan.dev->device, "[%s] Invalid direction.\n",
1157 __func__);
1158 res = -EINVAL;
1159 }
1160
1161 if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY &&
1162 d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
1163 d40c->runtime_addr == 0) {
1164
1165 dev_err(&d40c->chan.dev->device,
1166 "[%s] Invalid TX channel address (%d)\n",
1167 __func__, conf->dst_dev_type);
1168 res = -EINVAL;
1169 }
1170
1171 if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
1172 d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
1173 d40c->runtime_addr == 0) {
1174 dev_err(&d40c->chan.dev->device,
1175 "[%s] Invalid RX channel address (%d)\n",
1176 __func__, conf->src_dev_type);
1177 res = -EINVAL;
1178 }
1179
1180 if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
985 dst_event_group == STEDMA40_DEV_DST_MEMORY) { 1181 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
986 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n", 1182 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
987 __func__); 1183 __func__);
988 res = -EINVAL; 1184 res = -EINVAL;
989 } 1185 }
990 1186
991 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM && 1187 if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
992 src_event_group == STEDMA40_DEV_SRC_MEMORY) { 1188 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
993 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n", 1189 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
994 __func__); 1190 __func__);
@@ -1082,7 +1278,6 @@ static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1082 1278
1083 spin_lock_irqsave(&phy->lock, flags); 1279 spin_lock_irqsave(&phy->lock, flags);
1084 if (!log_event_line) { 1280 if (!log_event_line) {
1085 /* Physical interrupts are masked per physical full channel */
1086 phy->allocated_dst = D40_ALLOC_FREE; 1281 phy->allocated_dst = D40_ALLOC_FREE;
1087 phy->allocated_src = D40_ALLOC_FREE; 1282 phy->allocated_src = D40_ALLOC_FREE;
1088 is_free = true; 1283 is_free = true;
@@ -1251,7 +1446,6 @@ static int d40_free_dma(struct d40_chan *d40c)
1251 list_for_each_entry_safe(d, _d, &d40c->client, node) { 1446 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1252 d40_pool_lli_free(d); 1447 d40_pool_lli_free(d);
1253 d40_desc_remove(d); 1448 d40_desc_remove(d);
1254 /* Return desc to free-list */
1255 d40_desc_free(d40c, d); 1449 d40_desc_free(d40c, d);
1256 } 1450 }
1257 1451
@@ -1331,30 +1525,6 @@ static int d40_free_dma(struct d40_chan *d40c)
1331 return 0; 1525 return 0;
1332} 1526}
1333 1527
1334static int d40_pause(struct dma_chan *chan)
1335{
1336 struct d40_chan *d40c =
1337 container_of(chan, struct d40_chan, chan);
1338 int res;
1339 unsigned long flags;
1340
1341 spin_lock_irqsave(&d40c->lock, flags);
1342
1343 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1344 if (res == 0) {
1345 if (d40c->log_num != D40_PHY_CHAN) {
1346 d40_config_set_event(d40c, false);
1347 /* Resume the other logical channels if any */
1348 if (d40_chan_has_events(d40c))
1349 res = d40_channel_execute_command(d40c,
1350 D40_DMA_RUN);
1351 }
1352 }
1353
1354 spin_unlock_irqrestore(&d40c->lock, flags);
1355 return res;
1356}
1357
1358static bool d40_is_paused(struct d40_chan *d40c) 1528static bool d40_is_paused(struct d40_chan *d40c)
1359{ 1529{
1360 bool is_paused = false; 1530 bool is_paused = false;
@@ -1381,16 +1551,22 @@ static bool d40_is_paused(struct d40_chan *d40c)
1381 } 1551 }
1382 1552
1383 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || 1553 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1384 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) 1554 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1385 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); 1555 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1386 else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) 1556 status = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1557 d40c->phy_chan->num * D40_DREG_PCDELTA +
1558 D40_CHAN_REG_SDLNK);
1559 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1387 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); 1560 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1388 else { 1561 status = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1562 d40c->phy_chan->num * D40_DREG_PCDELTA +
1563 D40_CHAN_REG_SSLNK);
1564 } else {
1389 dev_err(&d40c->chan.dev->device, 1565 dev_err(&d40c->chan.dev->device,
1390 "[%s] Unknown direction\n", __func__); 1566 "[%s] Unknown direction\n", __func__);
1391 goto _exit; 1567 goto _exit;
1392 } 1568 }
1393 status = d40_chan_has_events(d40c); 1569
1394 status = (status & D40_EVENTLINE_MASK(event)) >> 1570 status = (status & D40_EVENTLINE_MASK(event)) >>
1395 D40_EVENTLINE_POS(event); 1571 D40_EVENTLINE_POS(event);
1396 1572
@@ -1403,64 +1579,6 @@ _exit:
1403} 1579}
1404 1580
1405 1581
1406static bool d40_tx_is_linked(struct d40_chan *d40c)
1407{
1408 bool is_link;
1409
1410 if (d40c->log_num != D40_PHY_CHAN)
1411 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1412 else
1413 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1414 d40c->phy_chan->num * D40_DREG_PCDELTA +
1415 D40_CHAN_REG_SDLNK) &
1416 D40_SREG_LNK_PHYS_LNK_MASK;
1417 return is_link;
1418}
1419
1420static u32 d40_residue(struct d40_chan *d40c)
1421{
1422 u32 num_elt;
1423
1424 if (d40c->log_num != D40_PHY_CHAN)
1425 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1426 >> D40_MEM_LCSP2_ECNT_POS;
1427 else
1428 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
1429 d40c->phy_chan->num * D40_DREG_PCDELTA +
1430 D40_CHAN_REG_SDELT) &
1431 D40_SREG_ELEM_PHY_ECNT_MASK) >>
1432 D40_SREG_ELEM_PHY_ECNT_POS;
1433 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
1434}
1435
1436static int d40_resume(struct dma_chan *chan)
1437{
1438 struct d40_chan *d40c =
1439 container_of(chan, struct d40_chan, chan);
1440 int res = 0;
1441 unsigned long flags;
1442
1443 spin_lock_irqsave(&d40c->lock, flags);
1444
1445 if (d40c->base->rev == 0)
1446 if (d40c->log_num != D40_PHY_CHAN) {
1447 res = d40_channel_execute_command(d40c,
1448 D40_DMA_SUSPEND_REQ);
1449 goto no_suspend;
1450 }
1451
1452 /* If bytes left to transfer or linked tx resume job */
1453 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
1454 if (d40c->log_num != D40_PHY_CHAN)
1455 d40_config_set_event(d40c, true);
1456 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1457 }
1458
1459no_suspend:
1460 spin_unlock_irqrestore(&d40c->lock, flags);
1461 return res;
1462}
1463
1464static u32 stedma40_residue(struct dma_chan *chan) 1582static u32 stedma40_residue(struct dma_chan *chan)
1465{ 1583{
1466 struct d40_chan *d40c = 1584 struct d40_chan *d40c =
@@ -1475,51 +1593,6 @@ static u32 stedma40_residue(struct dma_chan *chan)
1475 return bytes_left; 1593 return bytes_left;
1476} 1594}
1477 1595
1478/* Public DMA functions in addition to the DMA engine framework */
1479
1480int stedma40_set_psize(struct dma_chan *chan,
1481 int src_psize,
1482 int dst_psize)
1483{
1484 struct d40_chan *d40c =
1485 container_of(chan, struct d40_chan, chan);
1486 unsigned long flags;
1487
1488 spin_lock_irqsave(&d40c->lock, flags);
1489
1490 if (d40c->log_num != D40_PHY_CHAN) {
1491 d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1492 d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1493 d40c->log_def.lcsp1 |= src_psize <<
1494 D40_MEM_LCSP1_SCFG_PSIZE_POS;
1495 d40c->log_def.lcsp3 |= dst_psize <<
1496 D40_MEM_LCSP1_SCFG_PSIZE_POS;
1497 goto out;
1498 }
1499
1500 if (src_psize == STEDMA40_PSIZE_PHY_1)
1501 d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1502 else {
1503 d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1504 d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1505 D40_SREG_CFG_PSIZE_POS);
1506 d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
1507 }
1508
1509 if (dst_psize == STEDMA40_PSIZE_PHY_1)
1510 d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1511 else {
1512 d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1513 d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1514 D40_SREG_CFG_PSIZE_POS);
1515 d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
1516 }
1517out:
1518 spin_unlock_irqrestore(&d40c->lock, flags);
1519 return 0;
1520}
1521EXPORT_SYMBOL(stedma40_set_psize);
1522
1523struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, 1596struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1524 struct scatterlist *sgl_dst, 1597 struct scatterlist *sgl_dst,
1525 struct scatterlist *sgl_src, 1598 struct scatterlist *sgl_src,
@@ -1545,21 +1618,10 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1545 goto err; 1618 goto err;
1546 1619
1547 d40d->lli_len = sgl_len; 1620 d40d->lli_len = sgl_len;
1548 d40d->lli_tx_len = d40d->lli_len; 1621 d40d->lli_current = 0;
1549 d40d->txd.flags = dma_flags; 1622 d40d->txd.flags = dma_flags;
1550 1623
1551 if (d40c->log_num != D40_PHY_CHAN) { 1624 if (d40c->log_num != D40_PHY_CHAN) {
1552 if (d40d->lli_len > d40c->base->plat_data->llis_per_log)
1553 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1554
1555 if (sgl_len > 1)
1556 /*
1557 * Check if there is space available in lcla. If not,
1558 * split list into 1-length and run only in lcpa
1559 * space.
1560 */
1561 if (d40_lcla_id_get(d40c) != 0)
1562 d40d->lli_tx_len = 1;
1563 1625
1564 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) { 1626 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
1565 dev_err(&d40c->chan.dev->device, 1627 dev_err(&d40c->chan.dev->device,
@@ -1567,27 +1629,17 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1567 goto err; 1629 goto err;
1568 } 1630 }
1569 1631
1570 (void) d40_log_sg_to_lli(d40c->lcla.src_id, 1632 (void) d40_log_sg_to_lli(sgl_src,
1571 sgl_src,
1572 sgl_len, 1633 sgl_len,
1573 d40d->lli_log.src, 1634 d40d->lli_log.src,
1574 d40c->log_def.lcsp1, 1635 d40c->log_def.lcsp1,
1575 d40c->dma_cfg.src_info.data_width, 1636 d40c->dma_cfg.src_info.data_width);
1576 dma_flags & DMA_PREP_INTERRUPT,
1577 d40d->lli_tx_len,
1578 d40c->base->plat_data->llis_per_log);
1579 1637
1580 (void) d40_log_sg_to_lli(d40c->lcla.dst_id, 1638 (void) d40_log_sg_to_lli(sgl_dst,
1581 sgl_dst,
1582 sgl_len, 1639 sgl_len,
1583 d40d->lli_log.dst, 1640 d40d->lli_log.dst,
1584 d40c->log_def.lcsp3, 1641 d40c->log_def.lcsp3,
1585 d40c->dma_cfg.dst_info.data_width, 1642 d40c->dma_cfg.dst_info.data_width);
1586 dma_flags & DMA_PREP_INTERRUPT,
1587 d40d->lli_tx_len,
1588 d40c->base->plat_data->llis_per_log);
1589
1590
1591 } else { 1643 } else {
1592 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { 1644 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1593 dev_err(&d40c->chan.dev->device, 1645 dev_err(&d40c->chan.dev->device,
@@ -1599,11 +1651,10 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1599 sgl_len, 1651 sgl_len,
1600 0, 1652 0,
1601 d40d->lli_phy.src, 1653 d40d->lli_phy.src,
1602 d40d->lli_phy.src_addr, 1654 virt_to_phys(d40d->lli_phy.src),
1603 d40c->src_def_cfg, 1655 d40c->src_def_cfg,
1604 d40c->dma_cfg.src_info.data_width, 1656 d40c->dma_cfg.src_info.data_width,
1605 d40c->dma_cfg.src_info.psize, 1657 d40c->dma_cfg.src_info.psize);
1606 true);
1607 1658
1608 if (res < 0) 1659 if (res < 0)
1609 goto err; 1660 goto err;
@@ -1612,11 +1663,10 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1612 sgl_len, 1663 sgl_len,
1613 0, 1664 0,
1614 d40d->lli_phy.dst, 1665 d40d->lli_phy.dst,
1615 d40d->lli_phy.dst_addr, 1666 virt_to_phys(d40d->lli_phy.dst),
1616 d40c->dst_def_cfg, 1667 d40c->dst_def_cfg,
1617 d40c->dma_cfg.dst_info.data_width, 1668 d40c->dma_cfg.dst_info.data_width,
1618 d40c->dma_cfg.dst_info.psize, 1669 d40c->dma_cfg.dst_info.psize);
1619 true);
1620 1670
1621 if (res < 0) 1671 if (res < 0)
1622 goto err; 1672 goto err;
@@ -1633,6 +1683,8 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1633 1683
1634 return &d40d->txd; 1684 return &d40d->txd;
1635err: 1685err:
1686 if (d40d)
1687 d40_desc_free(d40c, d40d);
1636 spin_unlock_irqrestore(&d40c->lock, flags); 1688 spin_unlock_irqrestore(&d40c->lock, flags);
1637 return NULL; 1689 return NULL;
1638} 1690}
@@ -1673,6 +1725,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
1673 * use default configuration (memcpy) 1725 * use default configuration (memcpy)
1674 */ 1726 */
1675 if (d40c->dma_cfg.channel_type == 0) { 1727 if (d40c->dma_cfg.channel_type == 0) {
1728
1676 err = d40_config_memcpy(d40c); 1729 err = d40_config_memcpy(d40c);
1677 if (err) { 1730 if (err) {
1678 dev_err(&d40c->chan.dev->device, 1731 dev_err(&d40c->chan.dev->device,
@@ -1712,14 +1765,8 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
1712 * resource is free. In case of multiple logical channels 1765 * resource is free. In case of multiple logical channels
1713 * on the same physical resource, only the first write is necessary. 1766 * on the same physical resource, only the first write is necessary.
1714 */ 1767 */
1715 if (is_free_phy) { 1768 if (is_free_phy)
1716 err = d40_config_write(d40c); 1769 d40_config_write(d40c);
1717 if (err) {
1718 dev_err(&d40c->chan.dev->device,
1719 "[%s] Failed to configure channel\n",
1720 __func__);
1721 }
1722 }
1723fail: 1770fail:
1724 spin_unlock_irqrestore(&d40c->lock, flags); 1771 spin_unlock_irqrestore(&d40c->lock, flags);
1725 return err; 1772 return err;
@@ -1790,23 +1837,21 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1790 goto err; 1837 goto err;
1791 } 1838 }
1792 d40d->lli_len = 1; 1839 d40d->lli_len = 1;
1793 d40d->lli_tx_len = 1; 1840 d40d->lli_current = 0;
1794 1841
1795 d40_log_fill_lli(d40d->lli_log.src, 1842 d40_log_fill_lli(d40d->lli_log.src,
1796 src, 1843 src,
1797 size, 1844 size,
1798 0,
1799 d40c->log_def.lcsp1, 1845 d40c->log_def.lcsp1,
1800 d40c->dma_cfg.src_info.data_width, 1846 d40c->dma_cfg.src_info.data_width,
1801 false, true); 1847 true);
1802 1848
1803 d40_log_fill_lli(d40d->lli_log.dst, 1849 d40_log_fill_lli(d40d->lli_log.dst,
1804 dst, 1850 dst,
1805 size, 1851 size,
1806 0,
1807 d40c->log_def.lcsp3, 1852 d40c->log_def.lcsp3,
1808 d40c->dma_cfg.dst_info.data_width, 1853 d40c->dma_cfg.dst_info.data_width,
1809 true, true); 1854 true);
1810 1855
1811 } else { 1856 } else {
1812 1857
@@ -1851,8 +1896,9 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1851err_fill_lli: 1896err_fill_lli:
1852 dev_err(&d40c->chan.dev->device, 1897 dev_err(&d40c->chan.dev->device,
1853 "[%s] Failed filling in PHY LLI\n", __func__); 1898 "[%s] Failed filling in PHY LLI\n", __func__);
1854 d40_pool_lli_free(d40d);
1855err: 1899err:
1900 if (d40d)
1901 d40_desc_free(d40c, d40d);
1856 spin_unlock_irqrestore(&d40c->lock, flags); 1902 spin_unlock_irqrestore(&d40c->lock, flags);
1857 return NULL; 1903 return NULL;
1858} 1904}
@@ -1886,19 +1932,7 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1886 } 1932 }
1887 1933
1888 d40d->lli_len = sg_len; 1934 d40d->lli_len = sg_len;
1889 if (d40d->lli_len <= d40c->base->plat_data->llis_per_log) 1935 d40d->lli_current = 0;
1890 d40d->lli_tx_len = d40d->lli_len;
1891 else
1892 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1893
1894 if (sg_len > 1)
1895 /*
1896 * Check if there is space available in lcla.
1897 * If not, split list into 1-length and run only
1898 * in lcpa space.
1899 */
1900 if (d40_lcla_id_get(d40c) != 0)
1901 d40d->lli_tx_len = 1;
1902 1936
1903 if (direction == DMA_FROM_DEVICE) 1937 if (direction == DMA_FROM_DEVICE)
1904 if (d40c->runtime_addr) 1938 if (d40c->runtime_addr)
@@ -1914,16 +1948,13 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1914 else 1948 else
1915 return -EINVAL; 1949 return -EINVAL;
1916 1950
1917 total_size = d40_log_sg_to_dev(&d40c->lcla, 1951 total_size = d40_log_sg_to_dev(sgl, sg_len,
1918 sgl, sg_len,
1919 &d40d->lli_log, 1952 &d40d->lli_log,
1920 &d40c->log_def, 1953 &d40c->log_def,
1921 d40c->dma_cfg.src_info.data_width, 1954 d40c->dma_cfg.src_info.data_width,
1922 d40c->dma_cfg.dst_info.data_width, 1955 d40c->dma_cfg.dst_info.data_width,
1923 direction, 1956 direction,
1924 dma_flags & DMA_PREP_INTERRUPT, 1957 dev_addr);
1925 dev_addr, d40d->lli_tx_len,
1926 d40c->base->plat_data->llis_per_log);
1927 1958
1928 if (total_size < 0) 1959 if (total_size < 0)
1929 return -EINVAL; 1960 return -EINVAL;
@@ -1949,7 +1980,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1949 } 1980 }
1950 1981
1951 d40d->lli_len = sgl_len; 1982 d40d->lli_len = sgl_len;
1952 d40d->lli_tx_len = sgl_len; 1983 d40d->lli_current = 0;
1953 1984
1954 if (direction == DMA_FROM_DEVICE) { 1985 if (direction == DMA_FROM_DEVICE) {
1955 dst_dev_addr = 0; 1986 dst_dev_addr = 0;
@@ -1970,11 +2001,10 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1970 sgl_len, 2001 sgl_len,
1971 src_dev_addr, 2002 src_dev_addr,
1972 d40d->lli_phy.src, 2003 d40d->lli_phy.src,
1973 d40d->lli_phy.src_addr, 2004 virt_to_phys(d40d->lli_phy.src),
1974 d40c->src_def_cfg, 2005 d40c->src_def_cfg,
1975 d40c->dma_cfg.src_info.data_width, 2006 d40c->dma_cfg.src_info.data_width,
1976 d40c->dma_cfg.src_info.psize, 2007 d40c->dma_cfg.src_info.psize);
1977 true);
1978 if (res < 0) 2008 if (res < 0)
1979 return res; 2009 return res;
1980 2010
@@ -1982,11 +2012,10 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1982 sgl_len, 2012 sgl_len,
1983 dst_dev_addr, 2013 dst_dev_addr,
1984 d40d->lli_phy.dst, 2014 d40d->lli_phy.dst,
1985 d40d->lli_phy.dst_addr, 2015 virt_to_phys(d40d->lli_phy.dst),
1986 d40c->dst_def_cfg, 2016 d40c->dst_def_cfg,
1987 d40c->dma_cfg.dst_info.data_width, 2017 d40c->dma_cfg.dst_info.data_width,
1988 d40c->dma_cfg.dst_info.psize, 2018 d40c->dma_cfg.dst_info.psize);
1989 true);
1990 if (res < 0) 2019 if (res < 0)
1991 return res; 2020 return res;
1992 2021
@@ -2013,17 +2042,11 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2013 return ERR_PTR(-EINVAL); 2042 return ERR_PTR(-EINVAL);
2014 } 2043 }
2015 2044
2016 if (d40c->dma_cfg.pre_transfer)
2017 d40c->dma_cfg.pre_transfer(chan,
2018 d40c->dma_cfg.pre_transfer_data,
2019 sg_dma_len(sgl));
2020
2021 spin_lock_irqsave(&d40c->lock, flags); 2045 spin_lock_irqsave(&d40c->lock, flags);
2022 d40d = d40_desc_get(d40c); 2046 d40d = d40_desc_get(d40c);
2023 spin_unlock_irqrestore(&d40c->lock, flags);
2024 2047
2025 if (d40d == NULL) 2048 if (d40d == NULL)
2026 return NULL; 2049 goto err;
2027 2050
2028 if (d40c->log_num != D40_PHY_CHAN) 2051 if (d40c->log_num != D40_PHY_CHAN)
2029 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len, 2052 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
@@ -2036,7 +2059,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2036 "[%s] Failed to prepare %s slave sg job: %d\n", 2059 "[%s] Failed to prepare %s slave sg job: %d\n",
2037 __func__, 2060 __func__,
2038 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err); 2061 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
2039 return NULL; 2062 goto err;
2040 } 2063 }
2041 2064
2042 d40d->txd.flags = dma_flags; 2065 d40d->txd.flags = dma_flags;
@@ -2045,7 +2068,14 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2045 2068
2046 d40d->txd.tx_submit = d40_tx_submit; 2069 d40d->txd.tx_submit = d40_tx_submit;
2047 2070
2071 spin_unlock_irqrestore(&d40c->lock, flags);
2048 return &d40d->txd; 2072 return &d40d->txd;
2073
2074err:
2075 if (d40d)
2076 d40_desc_free(d40c, d40d);
2077 spin_unlock_irqrestore(&d40c->lock, flags);
2078 return NULL;
2049} 2079}
2050 2080
2051static enum dma_status d40_tx_status(struct dma_chan *chan, 2081static enum dma_status d40_tx_status(struct dma_chan *chan,
@@ -2178,14 +2208,25 @@ static void d40_set_runtime_config(struct dma_chan *chan,
2178 return; 2208 return;
2179 } 2209 }
2180 2210
2181 if (config_maxburst >= 16) 2211 if (d40c->log_num != D40_PHY_CHAN) {
2182 psize = STEDMA40_PSIZE_LOG_16; 2212 if (config_maxburst >= 16)
2183 else if (config_maxburst >= 8) 2213 psize = STEDMA40_PSIZE_LOG_16;
2184 psize = STEDMA40_PSIZE_LOG_8; 2214 else if (config_maxburst >= 8)
2185 else if (config_maxburst >= 4) 2215 psize = STEDMA40_PSIZE_LOG_8;
2186 psize = STEDMA40_PSIZE_LOG_4; 2216 else if (config_maxburst >= 4)
2187 else 2217 psize = STEDMA40_PSIZE_LOG_4;
2188 psize = STEDMA40_PSIZE_LOG_1; 2218 else
2219 psize = STEDMA40_PSIZE_LOG_1;
2220 } else {
2221 if (config_maxburst >= 16)
2222 psize = STEDMA40_PSIZE_PHY_16;
2223 else if (config_maxburst >= 8)
2224 psize = STEDMA40_PSIZE_PHY_8;
2225 else if (config_maxburst >= 4)
2226 psize = STEDMA40_PSIZE_PHY_4;
2227 else
2228 psize = STEDMA40_PSIZE_PHY_1;
2229 }
2189 2230
2190 /* Set up all the endpoint configs */ 2231 /* Set up all the endpoint configs */
2191 cfg->src_info.data_width = addr_width; 2232 cfg->src_info.data_width = addr_width;
@@ -2197,6 +2238,13 @@ static void d40_set_runtime_config(struct dma_chan *chan,
2197 cfg->dst_info.endianess = STEDMA40_LITTLE_ENDIAN; 2238 cfg->dst_info.endianess = STEDMA40_LITTLE_ENDIAN;
2198 cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; 2239 cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2199 2240
2241 /* Fill in register values */
2242 if (d40c->log_num != D40_PHY_CHAN)
2243 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2244 else
2245 d40_phy_cfg(cfg, &d40c->src_def_cfg,
2246 &d40c->dst_def_cfg, false);
2247
2200 /* These settings will take precedence later */ 2248 /* These settings will take precedence later */
2201 d40c->runtime_addr = config_addr; 2249 d40c->runtime_addr = config_addr;
2202 d40c->runtime_direction = config->direction; 2250 d40c->runtime_direction = config->direction;
@@ -2259,10 +2307,6 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2259 d40c->base = base; 2307 d40c->base = base;
2260 d40c->chan.device = dma; 2308 d40c->chan.device = dma;
2261 2309
2262 /* Invalidate lcla element */
2263 d40c->lcla.src_id = -1;
2264 d40c->lcla.dst_id = -1;
2265
2266 spin_lock_init(&d40c->lock); 2310 spin_lock_init(&d40c->lock);
2267 2311
2268 d40c->log_num = D40_PHY_CHAN; 2312 d40c->log_num = D40_PHY_CHAN;
@@ -2404,9 +2448,11 @@ static int __init d40_phy_res_init(struct d40_base *base)
2404 2448
2405 /* Mark disabled channels as occupied */ 2449 /* Mark disabled channels as occupied */
2406 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) { 2450 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
2407 base->phy_res[i].allocated_src = D40_ALLOC_PHY; 2451 int chan = base->plat_data->disabled_channels[i];
2408 base->phy_res[i].allocated_dst = D40_ALLOC_PHY; 2452
2409 num_phy_chans_avail--; 2453 base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
2454 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
2455 num_phy_chans_avail--;
2410 } 2456 }
2411 2457
2412 dev_info(base->dev, "%d of %d physical DMA channels available\n", 2458 dev_info(base->dev, "%d of %d physical DMA channels available\n",
@@ -2458,6 +2504,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2458 int num_phy_chans; 2504 int num_phy_chans;
2459 int i; 2505 int i;
2460 u32 val; 2506 u32 val;
2507 u32 rev;
2461 2508
2462 clk = clk_get(&pdev->dev, NULL); 2509 clk = clk_get(&pdev->dev, NULL);
2463 2510
@@ -2496,21 +2543,26 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2496 } 2543 }
2497 } 2544 }
2498 2545
2499 /* Get silicon revision */ 2546 /* Get silicon revision and designer */
2500 val = readl(virtbase + D40_DREG_PERIPHID2); 2547 val = readl(virtbase + D40_DREG_PERIPHID2);
2501 2548
2502 if ((val & 0xf) != D40_PERIPHID2_DESIGNER) { 2549 if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) !=
2550 D40_HW_DESIGNER) {
2503 dev_err(&pdev->dev, 2551 dev_err(&pdev->dev,
2504 "[%s] Unknown designer! Got %x wanted %x\n", 2552 "[%s] Unknown designer! Got %x wanted %x\n",
2505 __func__, val & 0xf, D40_PERIPHID2_DESIGNER); 2553 __func__, val & D40_DREG_PERIPHID2_DESIGNER_MASK,
2554 D40_HW_DESIGNER);
2506 goto failure; 2555 goto failure;
2507 } 2556 }
2508 2557
2558 rev = (val & D40_DREG_PERIPHID2_REV_MASK) >>
2559 D40_DREG_PERIPHID2_REV_POS;
2560
2509 /* The number of physical channels on this HW */ 2561 /* The number of physical channels on this HW */
2510 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; 2562 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2511 2563
2512 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", 2564 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2513 (val >> 4) & 0xf, res->start); 2565 rev, res->start);
2514 2566
2515 plat_data = pdev->dev.platform_data; 2567 plat_data = pdev->dev.platform_data;
2516 2568
@@ -2532,7 +2584,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2532 goto failure; 2584 goto failure;
2533 } 2585 }
2534 2586
2535 base->rev = (val >> 4) & 0xf; 2587 base->rev = rev;
2536 base->clk = clk; 2588 base->clk = clk;
2537 base->num_phy_chans = num_phy_chans; 2589 base->num_phy_chans = num_phy_chans;
2538 base->num_log_chans = num_log_chans; 2590 base->num_log_chans = num_log_chans;
@@ -2566,7 +2618,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2566 if (!base->lookup_log_chans) 2618 if (!base->lookup_log_chans)
2567 goto failure; 2619 goto failure;
2568 } 2620 }
2569 base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32), 2621
2622 base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
2623 sizeof(struct d40_desc *) *
2624 D40_LCLA_LINK_PER_EVENT_GRP,
2570 GFP_KERNEL); 2625 GFP_KERNEL);
2571 if (!base->lcla_pool.alloc_map) 2626 if (!base->lcla_pool.alloc_map)
2572 goto failure; 2627 goto failure;
@@ -2580,7 +2635,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2580 return base; 2635 return base;
2581 2636
2582failure: 2637failure:
2583 if (clk) { 2638 if (!IS_ERR(clk)) {
2584 clk_disable(clk); 2639 clk_disable(clk);
2585 clk_put(clk); 2640 clk_put(clk);
2586 } 2641 }
@@ -2717,8 +2772,10 @@ static int __init d40_lcla_allocate(struct d40_base *base)
2717 if (i < MAX_LCLA_ALLOC_ATTEMPTS) { 2772 if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
2718 base->lcla_pool.base = (void *)page_list[i]; 2773 base->lcla_pool.base = (void *)page_list[i];
2719 } else { 2774 } else {
2720 /* After many attempts, no succees with finding the correct 2775 /*
2721 * alignment try with allocating a big buffer */ 2776 * After many attempts and no succees with finding the correct
2777 * alignment, try with allocating a big buffer.
2778 */
2722 dev_warn(base->dev, 2779 dev_warn(base->dev,
2723 "[%s] Failed to get %d pages @ 18 bit align.\n", 2780 "[%s] Failed to get %d pages @ 18 bit align.\n",
2724 __func__, base->lcla_pool.pages); 2781 __func__, base->lcla_pool.pages);
@@ -2811,8 +2868,6 @@ static int __init d40_probe(struct platform_device *pdev)
2811 2868
2812 spin_lock_init(&base->lcla_pool.lock); 2869 spin_lock_init(&base->lcla_pool.lock);
2813 2870
2814 base->lcla_pool.num_blocks = base->num_phy_chans;
2815
2816 base->irq = platform_get_irq(pdev, 0); 2871 base->irq = platform_get_irq(pdev, 0);
2817 2872
2818 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); 2873 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
@@ -2840,8 +2895,9 @@ failure:
2840 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) 2895 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
2841 free_pages((unsigned long)base->lcla_pool.base, 2896 free_pages((unsigned long)base->lcla_pool.base,
2842 base->lcla_pool.pages); 2897 base->lcla_pool.pages);
2843 if (base->lcla_pool.base_unaligned) 2898
2844 kfree(base->lcla_pool.base_unaligned); 2899 kfree(base->lcla_pool.base_unaligned);
2900
2845 if (base->phy_lcpa) 2901 if (base->phy_lcpa)
2846 release_mem_region(base->phy_lcpa, 2902 release_mem_region(base->phy_lcpa,
2847 base->lcpa_size); 2903 base->lcpa_size);
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index d937f76d6e2e..86a306dbe1b4 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -1,10 +1,8 @@
1/* 1/*
2 * driver/dma/ste_dma40_ll.c 2 * Copyright (C) ST-Ericsson SA 2007-2010
3 * 3 * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson
4 * Copyright (C) ST-Ericsson 2007-2010 4 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2 5 * License terms: GNU General Public License (GPL) version 2
6 * Author: Per Friden <per.friden@stericsson.com>
7 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
8 */ 6 */
9 7
10#include <linux/kernel.h> 8#include <linux/kernel.h>
@@ -39,16 +37,13 @@ void d40_log_cfg(struct stedma40_chan_cfg *cfg,
39 cfg->dir == STEDMA40_PERIPH_TO_PERIPH) 37 cfg->dir == STEDMA40_PERIPH_TO_PERIPH)
40 l3 |= 1 << D40_MEM_LCSP3_DCFG_MST_POS; 38 l3 |= 1 << D40_MEM_LCSP3_DCFG_MST_POS;
41 39
42 l3 |= 1 << D40_MEM_LCSP3_DCFG_TIM_POS;
43 l3 |= 1 << D40_MEM_LCSP3_DCFG_EIM_POS; 40 l3 |= 1 << D40_MEM_LCSP3_DCFG_EIM_POS;
44 l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS; 41 l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS;
45 l3 |= cfg->dst_info.data_width << D40_MEM_LCSP3_DCFG_ESIZE_POS; 42 l3 |= cfg->dst_info.data_width << D40_MEM_LCSP3_DCFG_ESIZE_POS;
46 l3 |= 1 << D40_MEM_LCSP3_DTCP_POS;
47 43
48 l1 |= 1 << D40_MEM_LCSP1_SCFG_EIM_POS; 44 l1 |= 1 << D40_MEM_LCSP1_SCFG_EIM_POS;
49 l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; 45 l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
50 l1 |= cfg->src_info.data_width << D40_MEM_LCSP1_SCFG_ESIZE_POS; 46 l1 |= cfg->src_info.data_width << D40_MEM_LCSP1_SCFG_ESIZE_POS;
51 l1 |= 1 << D40_MEM_LCSP1_STCP_POS;
52 47
53 *lcsp1 = l1; 48 *lcsp1 = l1;
54 *lcsp3 = l3; 49 *lcsp3 = l3;
@@ -197,8 +192,7 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
197 dma_addr_t lli_phys, 192 dma_addr_t lli_phys,
198 u32 reg_cfg, 193 u32 reg_cfg,
199 u32 data_width, 194 u32 data_width,
200 int psize, 195 int psize)
201 bool term_int)
202{ 196{
203 int total_size = 0; 197 int total_size = 0;
204 int i; 198 int i;
@@ -238,7 +232,7 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
238 } 232 }
239 233
240 return total_size; 234 return total_size;
241 err: 235err:
242 return err; 236 return err;
243} 237}
244 238
@@ -271,11 +265,59 @@ void d40_phy_lli_write(void __iomem *virtbase,
271 265
272/* DMA logical lli operations */ 266/* DMA logical lli operations */
273 267
268static void d40_log_lli_link(struct d40_log_lli *lli_dst,
269 struct d40_log_lli *lli_src,
270 int next)
271{
272 u32 slos = 0;
273 u32 dlos = 0;
274
275 if (next != -EINVAL) {
276 slos = next * 2;
277 dlos = next * 2 + 1;
278 } else {
279 lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK;
280 lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK;
281 }
282
283 lli_src->lcsp13 = (lli_src->lcsp13 & ~D40_MEM_LCSP1_SLOS_MASK) |
284 (slos << D40_MEM_LCSP1_SLOS_POS);
285
286 lli_dst->lcsp13 = (lli_dst->lcsp13 & ~D40_MEM_LCSP1_SLOS_MASK) |
287 (dlos << D40_MEM_LCSP1_SLOS_POS);
288}
289
290void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
291 struct d40_log_lli *lli_dst,
292 struct d40_log_lli *lli_src,
293 int next)
294{
295 d40_log_lli_link(lli_dst, lli_src, next);
296
297 writel(lli_src->lcsp02, &lcpa[0].lcsp0);
298 writel(lli_src->lcsp13, &lcpa[0].lcsp1);
299 writel(lli_dst->lcsp02, &lcpa[0].lcsp2);
300 writel(lli_dst->lcsp13, &lcpa[0].lcsp3);
301}
302
303void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
304 struct d40_log_lli *lli_dst,
305 struct d40_log_lli *lli_src,
306 int next)
307{
308 d40_log_lli_link(lli_dst, lli_src, next);
309
310 writel(lli_src->lcsp02, &lcla[0].lcsp02);
311 writel(lli_src->lcsp13, &lcla[0].lcsp13);
312 writel(lli_dst->lcsp02, &lcla[1].lcsp02);
313 writel(lli_dst->lcsp13, &lcla[1].lcsp13);
314}
315
274void d40_log_fill_lli(struct d40_log_lli *lli, 316void d40_log_fill_lli(struct d40_log_lli *lli,
275 dma_addr_t data, u32 data_size, 317 dma_addr_t data, u32 data_size,
276 u32 lli_next_off, u32 reg_cfg, 318 u32 reg_cfg,
277 u32 data_width, 319 u32 data_width,
278 bool term_int, bool addr_inc) 320 bool addr_inc)
279{ 321{
280 lli->lcsp13 = reg_cfg; 322 lli->lcsp13 = reg_cfg;
281 323
@@ -290,165 +332,69 @@ void d40_log_fill_lli(struct d40_log_lli *lli,
290 if (addr_inc) 332 if (addr_inc)
291 lli->lcsp13 |= D40_MEM_LCSP1_SCFG_INCR_MASK; 333 lli->lcsp13 |= D40_MEM_LCSP1_SCFG_INCR_MASK;
292 334
293 lli->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK;
294 /* If this scatter list entry is the last one, no next link */
295 lli->lcsp13 |= (lli_next_off << D40_MEM_LCSP1_SLOS_POS) &
296 D40_MEM_LCSP1_SLOS_MASK;
297
298 if (term_int)
299 lli->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK;
300 else
301 lli->lcsp13 &= ~D40_MEM_LCSP1_SCFG_TIM_MASK;
302} 335}
303 336
304int d40_log_sg_to_dev(struct d40_lcla_elem *lcla, 337int d40_log_sg_to_dev(struct scatterlist *sg,
305 struct scatterlist *sg,
306 int sg_len, 338 int sg_len,
307 struct d40_log_lli_bidir *lli, 339 struct d40_log_lli_bidir *lli,
308 struct d40_def_lcsp *lcsp, 340 struct d40_def_lcsp *lcsp,
309 u32 src_data_width, 341 u32 src_data_width,
310 u32 dst_data_width, 342 u32 dst_data_width,
311 enum dma_data_direction direction, 343 enum dma_data_direction direction,
312 bool term_int, dma_addr_t dev_addr, int max_len, 344 dma_addr_t dev_addr)
313 int llis_per_log)
314{ 345{
315 int total_size = 0; 346 int total_size = 0;
316 struct scatterlist *current_sg = sg; 347 struct scatterlist *current_sg = sg;
317 int i; 348 int i;
318 u32 next_lli_off_dst = 0;
319 u32 next_lli_off_src = 0;
320 349
321 for_each_sg(sg, current_sg, sg_len, i) { 350 for_each_sg(sg, current_sg, sg_len, i) {
322 total_size += sg_dma_len(current_sg); 351 total_size += sg_dma_len(current_sg);
323 352
324 /*
325 * If this scatter list entry is the last one or
326 * max length, terminate link.
327 */
328 if (sg_len - 1 == i || ((i+1) % max_len == 0)) {
329 next_lli_off_src = 0;
330 next_lli_off_dst = 0;
331 } else {
332 if (next_lli_off_dst == 0 &&
333 next_lli_off_src == 0) {
334 /* The first lli will be at next_lli_off */
335 next_lli_off_dst = (lcla->dst_id *
336 llis_per_log + 1);
337 next_lli_off_src = (lcla->src_id *
338 llis_per_log + 1);
339 } else {
340 next_lli_off_dst++;
341 next_lli_off_src++;
342 }
343 }
344
345 if (direction == DMA_TO_DEVICE) { 353 if (direction == DMA_TO_DEVICE) {
346 d40_log_fill_lli(&lli->src[i], 354 d40_log_fill_lli(&lli->src[i],
347 sg_phys(current_sg), 355 sg_phys(current_sg),
348 sg_dma_len(current_sg), 356 sg_dma_len(current_sg),
349 next_lli_off_src,
350 lcsp->lcsp1, src_data_width, 357 lcsp->lcsp1, src_data_width,
351 false,
352 true); 358 true);
353 d40_log_fill_lli(&lli->dst[i], 359 d40_log_fill_lli(&lli->dst[i],
354 dev_addr, 360 dev_addr,
355 sg_dma_len(current_sg), 361 sg_dma_len(current_sg),
356 next_lli_off_dst,
357 lcsp->lcsp3, dst_data_width, 362 lcsp->lcsp3, dst_data_width,
358 /* No next == terminal interrupt */
359 term_int && !next_lli_off_dst,
360 false); 363 false);
361 } else { 364 } else {
362 d40_log_fill_lli(&lli->dst[i], 365 d40_log_fill_lli(&lli->dst[i],
363 sg_phys(current_sg), 366 sg_phys(current_sg),
364 sg_dma_len(current_sg), 367 sg_dma_len(current_sg),
365 next_lli_off_dst,
366 lcsp->lcsp3, dst_data_width, 368 lcsp->lcsp3, dst_data_width,
367 /* No next == terminal interrupt */
368 term_int && !next_lli_off_dst,
369 true); 369 true);
370 d40_log_fill_lli(&lli->src[i], 370 d40_log_fill_lli(&lli->src[i],
371 dev_addr, 371 dev_addr,
372 sg_dma_len(current_sg), 372 sg_dma_len(current_sg),
373 next_lli_off_src,
374 lcsp->lcsp1, src_data_width, 373 lcsp->lcsp1, src_data_width,
375 false,
376 false); 374 false);
377 } 375 }
378 } 376 }
379 return total_size; 377 return total_size;
380} 378}
381 379
382int d40_log_sg_to_lli(int lcla_id, 380int d40_log_sg_to_lli(struct scatterlist *sg,
383 struct scatterlist *sg,
384 int sg_len, 381 int sg_len,
385 struct d40_log_lli *lli_sg, 382 struct d40_log_lli *lli_sg,
386 u32 lcsp13, /* src or dst*/ 383 u32 lcsp13, /* src or dst*/
387 u32 data_width, 384 u32 data_width)
388 bool term_int, int max_len, int llis_per_log)
389{ 385{
390 int total_size = 0; 386 int total_size = 0;
391 struct scatterlist *current_sg = sg; 387 struct scatterlist *current_sg = sg;
392 int i; 388 int i;
393 u32 next_lli_off = 0;
394 389
395 for_each_sg(sg, current_sg, sg_len, i) { 390 for_each_sg(sg, current_sg, sg_len, i) {
396 total_size += sg_dma_len(current_sg); 391 total_size += sg_dma_len(current_sg);
397 392
398 /*
399 * If this scatter list entry is the last one or
400 * max length, terminate link.
401 */
402 if (sg_len - 1 == i || ((i+1) % max_len == 0))
403 next_lli_off = 0;
404 else {
405 if (next_lli_off == 0)
406 /* The first lli will be at next_lli_off */
407 next_lli_off = lcla_id * llis_per_log + 1;
408 else
409 next_lli_off++;
410 }
411
412 d40_log_fill_lli(&lli_sg[i], 393 d40_log_fill_lli(&lli_sg[i],
413 sg_phys(current_sg), 394 sg_phys(current_sg),
414 sg_dma_len(current_sg), 395 sg_dma_len(current_sg),
415 next_lli_off,
416 lcsp13, data_width, 396 lcsp13, data_width,
417 term_int && !next_lli_off,
418 true); 397 true);
419 } 398 }
420 return total_size; 399 return total_size;
421} 400}
422
423int d40_log_lli_write(struct d40_log_lli_full *lcpa,
424 struct d40_log_lli *lcla_src,
425 struct d40_log_lli *lcla_dst,
426 struct d40_log_lli *lli_dst,
427 struct d40_log_lli *lli_src,
428 int llis_per_log)
429{
430 u32 slos;
431 u32 dlos;
432 int i;
433
434 writel(lli_src->lcsp02, &lcpa->lcsp0);
435 writel(lli_src->lcsp13, &lcpa->lcsp1);
436 writel(lli_dst->lcsp02, &lcpa->lcsp2);
437 writel(lli_dst->lcsp13, &lcpa->lcsp3);
438
439 slos = lli_src->lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
440 dlos = lli_dst->lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
441
442 for (i = 0; (i < llis_per_log) && slos && dlos; i++) {
443 writel(lli_src[i + 1].lcsp02, &lcla_src[i].lcsp02);
444 writel(lli_src[i + 1].lcsp13, &lcla_src[i].lcsp13);
445 writel(lli_dst[i + 1].lcsp02, &lcla_dst[i].lcsp02);
446 writel(lli_dst[i + 1].lcsp13, &lcla_dst[i].lcsp13);
447
448 slos = lli_src[i + 1].lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
449 dlos = lli_dst[i + 1].lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
450 }
451
452 return i;
453
454}
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 9c0fa2f5fe57..37f81e84cd13 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -1,10 +1,8 @@
1/* 1/*
2 * driver/dma/ste_dma40_ll.h 2 * Copyright (C) ST-Ericsson SA 2007-2010
3 * 3 * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson SA
4 * Copyright (C) ST-Ericsson 2007-2010 4 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson SA
5 * License terms: GNU General Public License (GPL) version 2 5 * License terms: GNU General Public License (GPL) version 2
6 * Author: Per Friden <per.friden@stericsson.com>
7 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
8 */ 6 */
9#ifndef STE_DMA40_LL_H 7#ifndef STE_DMA40_LL_H
10#define STE_DMA40_LL_H 8#define STE_DMA40_LL_H
@@ -163,6 +161,9 @@
163#define D40_DREG_PERIPHID0 0xFE0 161#define D40_DREG_PERIPHID0 0xFE0
164#define D40_DREG_PERIPHID1 0xFE4 162#define D40_DREG_PERIPHID1 0xFE4
165#define D40_DREG_PERIPHID2 0xFE8 163#define D40_DREG_PERIPHID2 0xFE8
164#define D40_DREG_PERIPHID2_REV_POS 4
165#define D40_DREG_PERIPHID2_REV_MASK (0xf << D40_DREG_PERIPHID2_REV_POS)
166#define D40_DREG_PERIPHID2_DESIGNER_MASK 0xf
166#define D40_DREG_PERIPHID3 0xFEC 167#define D40_DREG_PERIPHID3 0xFEC
167#define D40_DREG_CELLID0 0xFF0 168#define D40_DREG_CELLID0 0xFF0
168#define D40_DREG_CELLID1 0xFF4 169#define D40_DREG_CELLID1 0xFF4
@@ -199,8 +200,6 @@ struct d40_phy_lli {
199 * 200 *
200 * @src: Register settings for src channel. 201 * @src: Register settings for src channel.
201 * @dst: Register settings for dst channel. 202 * @dst: Register settings for dst channel.
202 * @dst_addr: Physical destination address.
203 * @src_addr: Physical source address.
204 * 203 *
205 * All DMA transfers have a source and a destination. 204 * All DMA transfers have a source and a destination.
206 */ 205 */
@@ -208,8 +207,6 @@ struct d40_phy_lli {
208struct d40_phy_lli_bidir { 207struct d40_phy_lli_bidir {
209 struct d40_phy_lli *src; 208 struct d40_phy_lli *src;
210 struct d40_phy_lli *dst; 209 struct d40_phy_lli *dst;
211 dma_addr_t dst_addr;
212 dma_addr_t src_addr;
213}; 210};
214 211
215 212
@@ -271,29 +268,16 @@ struct d40_def_lcsp {
271 u32 lcsp1; 268 u32 lcsp1;
272}; 269};
273 270
274/**
275 * struct d40_lcla_elem - Info for one LCA element.
276 *
277 * @src_id: logical channel src id
278 * @dst_id: logical channel dst id
279 * @src: LCPA formated src parameters
280 * @dst: LCPA formated dst parameters
281 *
282 */
283struct d40_lcla_elem {
284 int src_id;
285 int dst_id;
286 struct d40_log_lli *src;
287 struct d40_log_lli *dst;
288};
289
290/* Physical channels */ 271/* Physical channels */
291 272
292void d40_phy_cfg(struct stedma40_chan_cfg *cfg, 273void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
293 u32 *src_cfg, u32 *dst_cfg, bool is_log); 274 u32 *src_cfg,
275 u32 *dst_cfg,
276 bool is_log);
294 277
295void d40_log_cfg(struct stedma40_chan_cfg *cfg, 278void d40_log_cfg(struct stedma40_chan_cfg *cfg,
296 u32 *lcsp1, u32 *lcsp2); 279 u32 *lcsp1,
280 u32 *lcsp2);
297 281
298int d40_phy_sg_to_lli(struct scatterlist *sg, 282int d40_phy_sg_to_lli(struct scatterlist *sg,
299 int sg_len, 283 int sg_len,
@@ -302,8 +286,7 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
302 dma_addr_t lli_phys, 286 dma_addr_t lli_phys,
303 u32 reg_cfg, 287 u32 reg_cfg,
304 u32 data_width, 288 u32 data_width,
305 int psize, 289 int psize);
306 bool term_int);
307 290
308int d40_phy_fill_lli(struct d40_phy_lli *lli, 291int d40_phy_fill_lli(struct d40_phy_lli *lli,
309 dma_addr_t data, 292 dma_addr_t data,
@@ -323,35 +306,35 @@ void d40_phy_lli_write(void __iomem *virtbase,
323/* Logical channels */ 306/* Logical channels */
324 307
325void d40_log_fill_lli(struct d40_log_lli *lli, 308void d40_log_fill_lli(struct d40_log_lli *lli,
326 dma_addr_t data, u32 data_size, 309 dma_addr_t data,
327 u32 lli_next_off, u32 reg_cfg, 310 u32 data_size,
311 u32 reg_cfg,
328 u32 data_width, 312 u32 data_width,
329 bool term_int, bool addr_inc); 313 bool addr_inc);
330 314
331int d40_log_sg_to_dev(struct d40_lcla_elem *lcla, 315int d40_log_sg_to_dev(struct scatterlist *sg,
332 struct scatterlist *sg,
333 int sg_len, 316 int sg_len,
334 struct d40_log_lli_bidir *lli, 317 struct d40_log_lli_bidir *lli,
335 struct d40_def_lcsp *lcsp, 318 struct d40_def_lcsp *lcsp,
336 u32 src_data_width, 319 u32 src_data_width,
337 u32 dst_data_width, 320 u32 dst_data_width,
338 enum dma_data_direction direction, 321 enum dma_data_direction direction,
339 bool term_int, dma_addr_t dev_addr, int max_len, 322 dma_addr_t dev_addr);
340 int llis_per_log); 323
341 324int d40_log_sg_to_lli(struct scatterlist *sg,
342int d40_log_lli_write(struct d40_log_lli_full *lcpa,
343 struct d40_log_lli *lcla_src,
344 struct d40_log_lli *lcla_dst,
345 struct d40_log_lli *lli_dst,
346 struct d40_log_lli *lli_src,
347 int llis_per_log);
348
349int d40_log_sg_to_lli(int lcla_id,
350 struct scatterlist *sg,
351 int sg_len, 325 int sg_len,
352 struct d40_log_lli *lli_sg, 326 struct d40_log_lli *lli_sg,
353 u32 lcsp13, /* src or dst*/ 327 u32 lcsp13, /* src or dst*/
354 u32 data_width, 328 u32 data_width);
355 bool term_int, int max_len, int llis_per_log); 329
330void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
331 struct d40_log_lli *lli_dst,
332 struct d40_log_lli *lli_src,
333 int next);
334
335void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
336 struct d40_log_lli *lli_dst,
337 struct d40_log_lli *lli_src,
338 int next);
356 339
357#endif /* STE_DMA40_LLI_H */ 340#endif /* STE_DMA40_LLI_H */