aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Walleij <linus.walleij@stericsson.com>2010-09-28 09:57:37 -0400
committerDan Williams <dan.j.williams@intel.com>2010-09-29 19:13:51 -0400
commite8689e63d4d2046079f2db9d494ac05c6885ac0c (patch)
tree29196d65697acc7fd49af9e00f2068413e29b771
parentb30a3f6257ed2105259b404d419b4964e363928c (diff)
dmaengine: driver for the ARM PL080/PL081 PrimeCells v5
This creates a DMAengine driver for the ARM PL080/PL081 PrimeCells based on the implementation earlier submitted by Peter Pearse. This is working like a charm for memcpy and slave DMA to the PL011 PrimeCell on the PB11MPCore. This DMA controller is used in mostly unmodified form in the ARM RealView and Versatile platforms, in the ST-Ericsson Nomadik, and in the ST SPEAr platform. It has been converted to use the header from the Samsung PL080 derivate instead of its own defintions. The Samsungs have a custom driver in their mach-* folders though, atleast we can share the register definitions. Cc: Peter Pearse <peter.pearse@arm.com> Cc: Ben Dooks <ben-linux@fluff.org> Cc: Kukjin Kim <kgene.kim@samsung.com> Cc: Alessandro Rubini <rubini@unipv.it> Acked-by: Viresh Kumar <viresh.kumar@st.com> Signed-off-by: Linus Walleij <linus.walleij@stericsson.com> [GFP_KERNEL to GFP_NOWAIT in pl08x_prep_dma_memcpy] Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--drivers/dma/Kconfig8
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/amba-pl08x.c2167
-rw-r--r--include/linux/amba/pl08x.h222
4 files changed, 2398 insertions, 0 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 9520cf02edc8..f82ef10a8361 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -49,6 +49,14 @@ config INTEL_MID_DMAC
49config ASYNC_TX_DISABLE_CHANNEL_SWITCH 49config ASYNC_TX_DISABLE_CHANNEL_SWITCH
50 bool 50 bool
51 51
52config AMBA_PL08X
53 bool "ARM PrimeCell PL080 or PL081 support"
54 depends on ARM_AMBA && EXPERIMENTAL
55 select DMA_ENGINE
56 help
57 Platform has a PL08x DMAC device
58 which can provide DMA engine support
59
52config INTEL_IOATDMA 60config INTEL_IOATDMA
53 tristate "Intel I/OAT DMA support" 61 tristate "Intel I/OAT DMA support"
54 depends on PCI && X86 62 depends on PCI && X86
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 72bd70384d8a..0b690e7e4384 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -25,3 +25,4 @@ obj-$(CONFIG_TIMB_DMA) += timb_dma.o
25obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o 25obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
26obj-$(CONFIG_PL330_DMA) += pl330.o 26obj-$(CONFIG_PL330_DMA) += pl330.o
27obj-$(CONFIG_PCH_DMA) += pch_dma.o 27obj-$(CONFIG_PCH_DMA) += pch_dma.o
28obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
new file mode 100644
index 000000000000..b605cc9ac3a2
--- /dev/null
+++ b/drivers/dma/amba-pl08x.c
@@ -0,0 +1,2167 @@
1/*
2 * Copyright (c) 2006 ARM Ltd.
3 * Copyright (c) 2010 ST-Ericsson SA
4 *
5 * Author: Peter Pearse <peter.pearse@arm.com>
6 * Author: Linus Walleij <linus.walleij@stericsson.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * The full GNU General Public License is iin this distribution in the
23 * file called COPYING.
24 *
25 * Documentation: ARM DDI 0196G == PL080
26 * Documentation: ARM DDI 0218E == PL081
27 *
28 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to
29 * any channel.
30 *
31 * The PL080 has 8 channels available for simultaneous use, and the PL081
32 * has only two channels. So on these DMA controllers the number of channels
33 * and the number of incoming DMA signals are two totally different things.
34 * It is usually not possible to theoretically handle all physical signals,
35 * so a multiplexing scheme with possible denial of use is necessary.
36 *
37 * The PL080 has a dual bus master, PL081 has a single master.
38 *
39 * Memory to peripheral transfer may be visualized as
40 * Get data from memory to DMAC
41 * Until no data left
42 * On burst request from peripheral
43 * Destination burst from DMAC to peripheral
44 * Clear burst request
45 * Raise terminal count interrupt
46 *
47 * For peripherals with a FIFO:
48 * Source burst size == half the depth of the peripheral FIFO
49 * Destination burst size == the depth of the peripheral FIFO
50 *
51 * (Bursts are irrelevant for mem to mem transfers - there are no burst
52 * signals, the DMA controller will simply facilitate its AHB master.)
53 *
54 * ASSUMES default (little) endianness for DMA transfers
55 *
56 * Only DMAC flow control is implemented
57 *
58 * Global TODO:
59 * - Break out common code from arch/arm/mach-s3c64xx and share
60 */
61#include <linux/device.h>
62#include <linux/init.h>
63#include <linux/module.h>
64#include <linux/pci.h>
65#include <linux/interrupt.h>
66#include <linux/slab.h>
67#include <linux/dmapool.h>
68#include <linux/amba/bus.h>
69#include <linux/dmaengine.h>
70#include <linux/amba/pl08x.h>
71#include <linux/debugfs.h>
72#include <linux/seq_file.h>
73
74#include <asm/hardware/pl080.h>
75#include <asm/dma.h>
76#include <asm/mach/dma.h>
77#include <asm/atomic.h>
78#include <asm/processor.h>
79#include <asm/cacheflush.h>
80
81#define DRIVER_NAME "pl08xdmac"
82
83/**
84 * struct vendor_data - vendor-specific config parameters
85 * for PL08x derivates
86 * @name: the name of this specific variant
87 * @channels: the number of channels available in this variant
88 * @dualmaster: whether this version supports dual AHB masters
89 * or not.
90 */
91struct vendor_data {
92 char *name;
93 u8 channels;
94 bool dualmaster;
95};
96
97/*
98 * PL08X private data structures
99 * An LLI struct - see pl08x TRM
100 * Note that next uses bit[0] as a bus bit,
101 * start & end do not - their bus bit info
102 * is in cctl
103 */
104struct lli {
105 dma_addr_t src;
106 dma_addr_t dst;
107 dma_addr_t next;
108 u32 cctl;
109};
110
111/**
112 * struct pl08x_driver_data - the local state holder for the PL08x
113 * @slave: slave engine for this instance
114 * @memcpy: memcpy engine for this instance
115 * @base: virtual memory base (remapped) for the PL08x
116 * @adev: the corresponding AMBA (PrimeCell) bus entry
117 * @vd: vendor data for this PL08x variant
118 * @pd: platform data passed in from the platform/machine
119 * @phy_chans: array of data for the physical channels
120 * @pool: a pool for the LLI descriptors
121 * @pool_ctr: counter of LLIs in the pool
122 * @lock: a spinlock for this struct
123 */
124struct pl08x_driver_data {
125 struct dma_device slave;
126 struct dma_device memcpy;
127 void __iomem *base;
128 struct amba_device *adev;
129 struct vendor_data *vd;
130 struct pl08x_platform_data *pd;
131 struct pl08x_phy_chan *phy_chans;
132 struct dma_pool *pool;
133 int pool_ctr;
134 spinlock_t lock;
135};
136
137/*
138 * PL08X specific defines
139 */
140
141/*
142 * Memory boundaries: the manual for PL08x says that the controller
143 * cannot read past a 1KiB boundary, so these defines are used to
144 * create transfer LLIs that do not cross such boundaries.
145 */
146#define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */
147#define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT)
148
149/* Minimum period between work queue runs */
150#define PL08X_WQ_PERIODMIN 20
151
152/* Size (bytes) of each LLI buffer allocated for one transfer */
153# define PL08X_LLI_TSFR_SIZE 0x2000
154
155/* Maximimum times we call dma_pool_alloc on this pool without freeing */
156#define PL08X_MAX_ALLOCS 0x40
157#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct lli))
158#define PL08X_ALIGN 8
159
160static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
161{
162 return container_of(chan, struct pl08x_dma_chan, chan);
163}
164
165/*
166 * Physical channel handling
167 */
168
169/* Whether a certain channel is busy or not */
170static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
171{
172 unsigned int val;
173
174 val = readl(ch->base + PL080_CH_CONFIG);
175 return val & PL080_CONFIG_ACTIVE;
176}
177
178/*
179 * Set the initial DMA register values i.e. those for the first LLI
180 * The next lli pointer and the configuration interrupt bit have
181 * been set when the LLIs were constructed
182 */
183static void pl08x_set_cregs(struct pl08x_driver_data *pl08x,
184 struct pl08x_phy_chan *ch)
185{
186 /* Wait for channel inactive */
187 while (pl08x_phy_channel_busy(ch))
188 ;
189
190 dev_vdbg(&pl08x->adev->dev,
191 "WRITE channel %d: csrc=%08x, cdst=%08x, "
192 "cctl=%08x, clli=%08x, ccfg=%08x\n",
193 ch->id,
194 ch->csrc,
195 ch->cdst,
196 ch->cctl,
197 ch->clli,
198 ch->ccfg);
199
200 writel(ch->csrc, ch->base + PL080_CH_SRC_ADDR);
201 writel(ch->cdst, ch->base + PL080_CH_DST_ADDR);
202 writel(ch->clli, ch->base + PL080_CH_LLI);
203 writel(ch->cctl, ch->base + PL080_CH_CONTROL);
204 writel(ch->ccfg, ch->base + PL080_CH_CONFIG);
205}
206
207static inline void pl08x_config_phychan_for_txd(struct pl08x_dma_chan *plchan)
208{
209 struct pl08x_channel_data *cd = plchan->cd;
210 struct pl08x_phy_chan *phychan = plchan->phychan;
211 struct pl08x_txd *txd = plchan->at;
212
213 /* Copy the basic control register calculated at transfer config */
214 phychan->csrc = txd->csrc;
215 phychan->cdst = txd->cdst;
216 phychan->clli = txd->clli;
217 phychan->cctl = txd->cctl;
218
219 /* Assign the signal to the proper control registers */
220 phychan->ccfg = cd->ccfg;
221 phychan->ccfg &= ~PL080_CONFIG_SRC_SEL_MASK;
222 phychan->ccfg &= ~PL080_CONFIG_DST_SEL_MASK;
223 /* If it wasn't set from AMBA, ignore it */
224 if (txd->direction == DMA_TO_DEVICE)
225 /* Select signal as destination */
226 phychan->ccfg |=
227 (phychan->signal << PL080_CONFIG_DST_SEL_SHIFT);
228 else if (txd->direction == DMA_FROM_DEVICE)
229 /* Select signal as source */
230 phychan->ccfg |=
231 (phychan->signal << PL080_CONFIG_SRC_SEL_SHIFT);
232 /* Always enable error interrupts */
233 phychan->ccfg |= PL080_CONFIG_ERR_IRQ_MASK;
234 /* Always enable terminal interrupts */
235 phychan->ccfg |= PL080_CONFIG_TC_IRQ_MASK;
236}
237
238/*
239 * Enable the DMA channel
240 * Assumes all other configuration bits have been set
241 * as desired before this code is called
242 */
243static void pl08x_enable_phy_chan(struct pl08x_driver_data *pl08x,
244 struct pl08x_phy_chan *ch)
245{
246 u32 val;
247
248 /*
249 * Do not access config register until channel shows as disabled
250 */
251 while (readl(pl08x->base + PL080_EN_CHAN) & (1 << ch->id))
252 ;
253
254 /*
255 * Do not access config register until channel shows as inactive
256 */
257 val = readl(ch->base + PL080_CH_CONFIG);
258 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
259 val = readl(ch->base + PL080_CH_CONFIG);
260
261 writel(val | PL080_CONFIG_ENABLE, ch->base + PL080_CH_CONFIG);
262}
263
264/*
265 * Overall DMAC remains enabled always.
266 *
267 * Disabling individual channels could lose data.
268 *
269 * Disable the peripheral DMA after disabling the DMAC
270 * in order to allow the DMAC FIFO to drain, and
271 * hence allow the channel to show inactive
272 *
273 */
274static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
275{
276 u32 val;
277
278 /* Set the HALT bit and wait for the FIFO to drain */
279 val = readl(ch->base + PL080_CH_CONFIG);
280 val |= PL080_CONFIG_HALT;
281 writel(val, ch->base + PL080_CH_CONFIG);
282
283 /* Wait for channel inactive */
284 while (pl08x_phy_channel_busy(ch))
285 ;
286}
287
288static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
289{
290 u32 val;
291
292 /* Clear the HALT bit */
293 val = readl(ch->base + PL080_CH_CONFIG);
294 val &= ~PL080_CONFIG_HALT;
295 writel(val, ch->base + PL080_CH_CONFIG);
296}
297
298
299/* Stops the channel */
300static void pl08x_stop_phy_chan(struct pl08x_phy_chan *ch)
301{
302 u32 val;
303
304 pl08x_pause_phy_chan(ch);
305
306 /* Disable channel */
307 val = readl(ch->base + PL080_CH_CONFIG);
308 val &= ~PL080_CONFIG_ENABLE;
309 val &= ~PL080_CONFIG_ERR_IRQ_MASK;
310 val &= ~PL080_CONFIG_TC_IRQ_MASK;
311 writel(val, ch->base + PL080_CH_CONFIG);
312}
313
314static inline u32 get_bytes_in_cctl(u32 cctl)
315{
316 /* The source width defines the number of bytes */
317 u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK;
318
319 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
320 case PL080_WIDTH_8BIT:
321 break;
322 case PL080_WIDTH_16BIT:
323 bytes *= 2;
324 break;
325 case PL080_WIDTH_32BIT:
326 bytes *= 4;
327 break;
328 }
329 return bytes;
330}
331
332/* The channel should be paused when calling this */
333static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
334{
335 struct pl08x_phy_chan *ch;
336 struct pl08x_txd *txdi = NULL;
337 struct pl08x_txd *txd;
338 unsigned long flags;
339 u32 bytes = 0;
340
341 spin_lock_irqsave(&plchan->lock, flags);
342
343 ch = plchan->phychan;
344 txd = plchan->at;
345
346 /*
347 * Next follow the LLIs to get the number of pending bytes in the
348 * currently active transaction.
349 */
350 if (ch && txd) {
351 struct lli *llis_va = txd->llis_va;
352 struct lli *llis_bus = (struct lli *) txd->llis_bus;
353 u32 clli = readl(ch->base + PL080_CH_LLI);
354
355 /* First get the bytes in the current active LLI */
356 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
357
358 if (clli) {
359 int i = 0;
360
361 /* Forward to the LLI pointed to by clli */
362 while ((clli != (u32) &(llis_bus[i])) &&
363 (i < MAX_NUM_TSFR_LLIS))
364 i++;
365
366 while (clli) {
367 bytes += get_bytes_in_cctl(llis_va[i].cctl);
368 /*
369 * A clli of 0x00000000 will terminate the
370 * LLI list
371 */
372 clli = llis_va[i].next;
373 i++;
374 }
375 }
376 }
377
378 /* Sum up all queued transactions */
379 if (!list_empty(&plchan->desc_list)) {
380 list_for_each_entry(txdi, &plchan->desc_list, node) {
381 bytes += txdi->len;
382 }
383
384 }
385
386 spin_unlock_irqrestore(&plchan->lock, flags);
387
388 return bytes;
389}
390
391/*
392 * Allocate a physical channel for a virtual channel
393 */
394static struct pl08x_phy_chan *
395pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
396 struct pl08x_dma_chan *virt_chan)
397{
398 struct pl08x_phy_chan *ch = NULL;
399 unsigned long flags;
400 int i;
401
402 /*
403 * Try to locate a physical channel to be used for
404 * this transfer. If all are taken return NULL and
405 * the requester will have to cope by using some fallback
406 * PIO mode or retrying later.
407 */
408 for (i = 0; i < pl08x->vd->channels; i++) {
409 ch = &pl08x->phy_chans[i];
410
411 spin_lock_irqsave(&ch->lock, flags);
412
413 if (!ch->serving) {
414 ch->serving = virt_chan;
415 ch->signal = -1;
416 spin_unlock_irqrestore(&ch->lock, flags);
417 break;
418 }
419
420 spin_unlock_irqrestore(&ch->lock, flags);
421 }
422
423 if (i == pl08x->vd->channels) {
424 /* No physical channel available, cope with it */
425 return NULL;
426 }
427
428 return ch;
429}
430
431static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
432 struct pl08x_phy_chan *ch)
433{
434 unsigned long flags;
435
436 /* Stop the channel and clear its interrupts */
437 pl08x_stop_phy_chan(ch);
438 writel((1 << ch->id), pl08x->base + PL080_ERR_CLEAR);
439 writel((1 << ch->id), pl08x->base + PL080_TC_CLEAR);
440
441 /* Mark it as free */
442 spin_lock_irqsave(&ch->lock, flags);
443 ch->serving = NULL;
444 spin_unlock_irqrestore(&ch->lock, flags);
445}
446
447/*
448 * LLI handling
449 */
450
451static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded)
452{
453 switch (coded) {
454 case PL080_WIDTH_8BIT:
455 return 1;
456 case PL080_WIDTH_16BIT:
457 return 2;
458 case PL080_WIDTH_32BIT:
459 return 4;
460 default:
461 break;
462 }
463 BUG();
464 return 0;
465}
466
467static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
468 u32 tsize)
469{
470 u32 retbits = cctl;
471
472 /* Remove all src, dst and transfersize bits */
473 retbits &= ~PL080_CONTROL_DWIDTH_MASK;
474 retbits &= ~PL080_CONTROL_SWIDTH_MASK;
475 retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK;
476
477 /* Then set the bits according to the parameters */
478 switch (srcwidth) {
479 case 1:
480 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT;
481 break;
482 case 2:
483 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT;
484 break;
485 case 4:
486 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT;
487 break;
488 default:
489 BUG();
490 break;
491 }
492
493 switch (dstwidth) {
494 case 1:
495 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT;
496 break;
497 case 2:
498 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT;
499 break;
500 case 4:
501 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT;
502 break;
503 default:
504 BUG();
505 break;
506 }
507
508 retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT;
509 return retbits;
510}
511
512/*
513 * Autoselect a master bus to use for the transfer
514 * this prefers the destination bus if both available
515 * if fixed address on one bus the other will be chosen
516 */
517void pl08x_choose_master_bus(struct pl08x_bus_data *src_bus,
518 struct pl08x_bus_data *dst_bus, struct pl08x_bus_data **mbus,
519 struct pl08x_bus_data **sbus, u32 cctl)
520{
521 if (!(cctl & PL080_CONTROL_DST_INCR)) {
522 *mbus = src_bus;
523 *sbus = dst_bus;
524 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
525 *mbus = dst_bus;
526 *sbus = src_bus;
527 } else {
528 if (dst_bus->buswidth == 4) {
529 *mbus = dst_bus;
530 *sbus = src_bus;
531 } else if (src_bus->buswidth == 4) {
532 *mbus = src_bus;
533 *sbus = dst_bus;
534 } else if (dst_bus->buswidth == 2) {
535 *mbus = dst_bus;
536 *sbus = src_bus;
537 } else if (src_bus->buswidth == 2) {
538 *mbus = src_bus;
539 *sbus = dst_bus;
540 } else {
541 /* src_bus->buswidth == 1 */
542 *mbus = dst_bus;
543 *sbus = src_bus;
544 }
545 }
546}
547
548/*
549 * Fills in one LLI for a certain transfer descriptor
550 * and advance the counter
551 */
552int pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x,
553 struct pl08x_txd *txd, int num_llis, int len,
554 u32 cctl, u32 *remainder)
555{
556 struct lli *llis_va = txd->llis_va;
557 struct lli *llis_bus = (struct lli *) txd->llis_bus;
558
559 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
560
561 llis_va[num_llis].cctl = cctl;
562 llis_va[num_llis].src = txd->srcbus.addr;
563 llis_va[num_llis].dst = txd->dstbus.addr;
564
565 /*
566 * On versions with dual masters, you can optionally AND on
567 * PL080_LLI_LM_AHB2 to the LLI to tell the hardware to read
568 * in new LLIs with that controller, but we always try to
569 * choose AHB1 to point into memory. The idea is to have AHB2
570 * fixed on the peripheral and AHB1 messing around in the
571 * memory. So we don't manipulate this bit currently.
572 */
573
574 llis_va[num_llis].next =
575 (dma_addr_t)((u32) &(llis_bus[num_llis + 1]));
576
577 if (cctl & PL080_CONTROL_SRC_INCR)
578 txd->srcbus.addr += len;
579 if (cctl & PL080_CONTROL_DST_INCR)
580 txd->dstbus.addr += len;
581
582 *remainder -= len;
583
584 return num_llis + 1;
585}
586
587/*
588 * Return number of bytes to fill to boundary, or len
589 */
590static inline u32 pl08x_pre_boundary(u32 addr, u32 len)
591{
592 u32 boundary;
593
594 boundary = ((addr >> PL08X_BOUNDARY_SHIFT) + 1)
595 << PL08X_BOUNDARY_SHIFT;
596
597 if (boundary < addr + len)
598 return boundary - addr;
599 else
600 return len;
601}
602
603/*
604 * This fills in the table of LLIs for the transfer descriptor
605 * Note that we assume we never have to change the burst sizes
606 * Return 0 for error
607 */
608static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
609 struct pl08x_txd *txd)
610{
611 struct pl08x_channel_data *cd = txd->cd;
612 struct pl08x_bus_data *mbus, *sbus;
613 u32 remainder;
614 int num_llis = 0;
615 u32 cctl;
616 int max_bytes_per_lli;
617 int total_bytes = 0;
618 struct lli *llis_va;
619 struct lli *llis_bus;
620
621 if (!txd) {
622 dev_err(&pl08x->adev->dev, "%s no descriptor\n", __func__);
623 return 0;
624 }
625
626 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT,
627 &txd->llis_bus);
628 if (!txd->llis_va) {
629 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__);
630 return 0;
631 }
632
633 pl08x->pool_ctr++;
634
635 /*
636 * Initialize bus values for this transfer
637 * from the passed optimal values
638 */
639 if (!cd) {
640 dev_err(&pl08x->adev->dev, "%s no channel data\n", __func__);
641 return 0;
642 }
643
644 /* Get the default CCTL from the platform data */
645 cctl = cd->cctl;
646
647 /*
648 * On the PL080 we have two bus masters and we
649 * should select one for source and one for
650 * destination. We try to use AHB2 for the
651 * bus which does not increment (typically the
652 * peripheral) else we just choose something.
653 */
654 cctl &= ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
655 if (pl08x->vd->dualmaster) {
656 if (cctl & PL080_CONTROL_SRC_INCR)
657 /* Source increments, use AHB2 for destination */
658 cctl |= PL080_CONTROL_DST_AHB2;
659 else if (cctl & PL080_CONTROL_DST_INCR)
660 /* Destination increments, use AHB2 for source */
661 cctl |= PL080_CONTROL_SRC_AHB2;
662 else
663 /* Just pick something, source AHB1 dest AHB2 */
664 cctl |= PL080_CONTROL_DST_AHB2;
665 }
666
667 /* Find maximum width of the source bus */
668 txd->srcbus.maxwidth =
669 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >>
670 PL080_CONTROL_SWIDTH_SHIFT);
671
672 /* Find maximum width of the destination bus */
673 txd->dstbus.maxwidth =
674 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
675 PL080_CONTROL_DWIDTH_SHIFT);
676
677 /* Set up the bus widths to the maximum */
678 txd->srcbus.buswidth = txd->srcbus.maxwidth;
679 txd->dstbus.buswidth = txd->dstbus.maxwidth;
680 dev_vdbg(&pl08x->adev->dev,
681 "%s source bus is %d bytes wide, dest bus is %d bytes wide\n",
682 __func__, txd->srcbus.buswidth, txd->dstbus.buswidth);
683
684
685 /*
686 * Bytes transferred == tsize * MIN(buswidths), not max(buswidths)
687 */
688 max_bytes_per_lli = min(txd->srcbus.buswidth, txd->dstbus.buswidth) *
689 PL080_CONTROL_TRANSFER_SIZE_MASK;
690 dev_vdbg(&pl08x->adev->dev,
691 "%s max bytes per lli = %d\n",
692 __func__, max_bytes_per_lli);
693
694 /* We need to count this down to zero */
695 remainder = txd->len;
696 dev_vdbg(&pl08x->adev->dev,
697 "%s remainder = %d\n",
698 __func__, remainder);
699
700 /*
701 * Choose bus to align to
702 * - prefers destination bus if both available
703 * - if fixed address on one bus chooses other
704 * - modifies cctl to choose an apropriate master
705 */
706 pl08x_choose_master_bus(&txd->srcbus, &txd->dstbus,
707 &mbus, &sbus, cctl);
708
709
710 /*
711 * The lowest bit of the LLI register
712 * is also used to indicate which master to
713 * use for reading the LLIs.
714 */
715
716 if (txd->len < mbus->buswidth) {
717 /*
718 * Less than a bus width available
719 * - send as single bytes
720 */
721 while (remainder) {
722 dev_vdbg(&pl08x->adev->dev,
723 "%s single byte LLIs for a transfer of "
724 "less than a bus width (remain %08x)\n",
725 __func__, remainder);
726 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
727 num_llis =
728 pl08x_fill_lli_for_desc(pl08x, txd, num_llis, 1,
729 cctl, &remainder);
730 total_bytes++;
731 }
732 } else {
733 /*
734 * Make one byte LLIs until master bus is aligned
735 * - slave will then be aligned also
736 */
737 while ((mbus->addr) % (mbus->buswidth)) {
738 dev_vdbg(&pl08x->adev->dev,
739 "%s adjustment lli for less than bus width "
740 "(remain %08x)\n",
741 __func__, remainder);
742 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
743 num_llis = pl08x_fill_lli_for_desc
744 (pl08x, txd, num_llis, 1, cctl, &remainder);
745 total_bytes++;
746 }
747
748 /*
749 * Master now aligned
750 * - if slave is not then we must set its width down
751 */
752 if (sbus->addr % sbus->buswidth) {
753 dev_dbg(&pl08x->adev->dev,
754 "%s set down bus width to one byte\n",
755 __func__);
756
757 sbus->buswidth = 1;
758 }
759
760 /*
761 * Make largest possible LLIs until less than one bus
762 * width left
763 */
764 while (remainder > (mbus->buswidth - 1)) {
765 int lli_len, target_len;
766 int tsize;
767 int odd_bytes;
768
769 /*
770 * If enough left try to send max possible,
771 * otherwise try to send the remainder
772 */
773 target_len = remainder;
774 if (remainder > max_bytes_per_lli)
775 target_len = max_bytes_per_lli;
776
777 /*
778 * Set bus lengths for incrementing busses
779 * to number of bytes which fill to next memory
780 * boundary
781 */
782 if (cctl & PL080_CONTROL_SRC_INCR)
783 txd->srcbus.fill_bytes =
784 pl08x_pre_boundary(
785 txd->srcbus.addr,
786 remainder);
787 else
788 txd->srcbus.fill_bytes =
789 max_bytes_per_lli;
790
791 if (cctl & PL080_CONTROL_DST_INCR)
792 txd->dstbus.fill_bytes =
793 pl08x_pre_boundary(
794 txd->dstbus.addr,
795 remainder);
796 else
797 txd->dstbus.fill_bytes =
798 max_bytes_per_lli;
799
800 /*
801 * Find the nearest
802 */
803 lli_len = min(txd->srcbus.fill_bytes,
804 txd->dstbus.fill_bytes);
805
806 BUG_ON(lli_len > remainder);
807
808 if (lli_len <= 0) {
809 dev_err(&pl08x->adev->dev,
810 "%s lli_len is %d, <= 0\n",
811 __func__, lli_len);
812 return 0;
813 }
814
815 if (lli_len == target_len) {
816 /*
817 * Can send what we wanted
818 */
819 /*
820 * Maintain alignment
821 */
822 lli_len = (lli_len/mbus->buswidth) *
823 mbus->buswidth;
824 odd_bytes = 0;
825 } else {
826 /*
827 * So now we know how many bytes to transfer
828 * to get to the nearest boundary
829 * The next lli will past the boundary
830 * - however we may be working to a boundary
831 * on the slave bus
832 * We need to ensure the master stays aligned
833 */
834 odd_bytes = lli_len % mbus->buswidth;
835 /*
836 * - and that we are working in multiples
837 * of the bus widths
838 */
839 lli_len -= odd_bytes;
840
841 }
842
843 if (lli_len) {
844 /*
845 * Check against minimum bus alignment:
846 * Calculate actual transfer size in relation
847 * to bus width an get a maximum remainder of
848 * the smallest bus width - 1
849 */
850 /* FIXME: use round_down()? */
851 tsize = lli_len / min(mbus->buswidth,
852 sbus->buswidth);
853 lli_len = tsize * min(mbus->buswidth,
854 sbus->buswidth);
855
856 if (target_len != lli_len) {
857 dev_vdbg(&pl08x->adev->dev,
858 "%s can't send what we want. Desired %08x, lli of %08x bytes in txd of %08x\n",
859 __func__, target_len, lli_len, txd->len);
860 }
861
862 cctl = pl08x_cctl_bits(cctl,
863 txd->srcbus.buswidth,
864 txd->dstbus.buswidth,
865 tsize);
866
867 dev_vdbg(&pl08x->adev->dev,
868 "%s fill lli with single lli chunk of size %08x (remainder %08x)\n",
869 __func__, lli_len, remainder);
870 num_llis = pl08x_fill_lli_for_desc(pl08x, txd,
871 num_llis, lli_len, cctl,
872 &remainder);
873 total_bytes += lli_len;
874 }
875
876
877 if (odd_bytes) {
878 /*
879 * Creep past the boundary,
880 * maintaining master alignment
881 */
882 int j;
883 for (j = 0; (j < mbus->buswidth)
884 && (remainder); j++) {
885 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
886 dev_vdbg(&pl08x->adev->dev,
887 "%s align with boundardy, single byte (remain %08x)\n",
888 __func__, remainder);
889 num_llis =
890 pl08x_fill_lli_for_desc(pl08x,
891 txd, num_llis, 1,
892 cctl, &remainder);
893 total_bytes++;
894 }
895 }
896 }
897
898 /*
899 * Send any odd bytes
900 */
901 if (remainder < 0) {
902 dev_err(&pl08x->adev->dev, "%s remainder not fitted 0x%08x bytes\n",
903 __func__, remainder);
904 return 0;
905 }
906
907 while (remainder) {
908 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
909 dev_vdbg(&pl08x->adev->dev,
910 "%s align with boundardy, single odd byte (remain %d)\n",
911 __func__, remainder);
912 num_llis = pl08x_fill_lli_for_desc(pl08x, txd, num_llis,
913 1, cctl, &remainder);
914 total_bytes++;
915 }
916 }
917 if (total_bytes != txd->len) {
918 dev_err(&pl08x->adev->dev,
919 "%s size of encoded lli:s don't match total txd, transferred 0x%08x from size 0x%08x\n",
920 __func__, total_bytes, txd->len);
921 return 0;
922 }
923
924 if (num_llis >= MAX_NUM_TSFR_LLIS) {
925 dev_err(&pl08x->adev->dev,
926 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
927 __func__, (u32) MAX_NUM_TSFR_LLIS);
928 return 0;
929 }
930 /*
931 * Decide whether this is a loop or a terminated transfer
932 */
933 llis_va = txd->llis_va;
934 llis_bus = (struct lli *) txd->llis_bus;
935
936 if (cd->circular_buffer) {
937 /*
938 * Loop the circular buffer so that the next element
939 * points back to the beginning of the LLI.
940 */
941 llis_va[num_llis - 1].next =
942 (dma_addr_t)((unsigned int)&(llis_bus[0]));
943 } else {
944 /*
945 * On non-circular buffers, the final LLI terminates
946 * the LLI.
947 */
948 llis_va[num_llis - 1].next = 0;
949 /*
950 * The final LLI element shall also fire an interrupt
951 */
952 llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
953 }
954
955 /* Now store the channel register values */
956 txd->csrc = llis_va[0].src;
957 txd->cdst = llis_va[0].dst;
958 if (num_llis > 1)
959 txd->clli = llis_va[0].next;
960 else
961 txd->clli = 0;
962
963 txd->cctl = llis_va[0].cctl;
964 /* ccfg will be set at physical channel allocation time */
965
966#ifdef VERBOSE_DEBUG
967 {
968 int i;
969
970 for (i = 0; i < num_llis; i++) {
971 dev_vdbg(&pl08x->adev->dev,
972 "lli %d @%p: csrc=%08x, cdst=%08x, cctl=%08x, clli=%08x\n",
973 i,
974 &llis_va[i],
975 llis_va[i].src,
976 llis_va[i].dst,
977 llis_va[i].cctl,
978 llis_va[i].next
979 );
980 }
981 }
982#endif
983
984 return num_llis;
985}
986
987/* You should call this with the struct pl08x lock held */
988static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
989 struct pl08x_txd *txd)
990{
991 if (!txd)
992 dev_err(&pl08x->adev->dev,
993 "%s no descriptor to free\n",
994 __func__);
995
996 /* Free the LLI */
997 dma_pool_free(pl08x->pool, txd->llis_va,
998 txd->llis_bus);
999
1000 pl08x->pool_ctr--;
1001
1002 kfree(txd);
1003}
1004
1005static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
1006 struct pl08x_dma_chan *plchan)
1007{
1008 struct pl08x_txd *txdi = NULL;
1009 struct pl08x_txd *next;
1010
1011 if (!list_empty(&plchan->desc_list)) {
1012 list_for_each_entry_safe(txdi,
1013 next, &plchan->desc_list, node) {
1014 list_del(&txdi->node);
1015 pl08x_free_txd(pl08x, txdi);
1016 }
1017
1018 }
1019}
1020
1021/*
1022 * The DMA ENGINE API
1023 */
1024static int pl08x_alloc_chan_resources(struct dma_chan *chan)
1025{
1026 return 0;
1027}
1028
1029static void pl08x_free_chan_resources(struct dma_chan *chan)
1030{
1031}
1032
1033/*
1034 * This should be called with the channel plchan->lock held
1035 */
1036static int prep_phy_channel(struct pl08x_dma_chan *plchan,
1037 struct pl08x_txd *txd)
1038{
1039 struct pl08x_driver_data *pl08x = plchan->host;
1040 struct pl08x_phy_chan *ch;
1041 int ret;
1042
1043 /* Check if we already have a channel */
1044 if (plchan->phychan)
1045 return 0;
1046
1047 ch = pl08x_get_phy_channel(pl08x, plchan);
1048 if (!ch) {
1049 /* No physical channel available, cope with it */
1050 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
1051 return -EBUSY;
1052 }
1053
1054 /*
1055 * OK we have a physical channel: for memcpy() this is all we
1056 * need, but for slaves the physical signals may be muxed!
1057 * Can the platform allow us to use this channel?
1058 */
1059 if (plchan->slave &&
1060 ch->signal < 0 &&
1061 pl08x->pd->get_signal) {
1062 ret = pl08x->pd->get_signal(plchan);
1063 if (ret < 0) {
1064 dev_dbg(&pl08x->adev->dev,
1065 "unable to use physical channel %d for transfer on %s due to platform restrictions\n",
1066 ch->id, plchan->name);
1067 /* Release physical channel & return */
1068 pl08x_put_phy_channel(pl08x, ch);
1069 return -EBUSY;
1070 }
1071 ch->signal = ret;
1072 }
1073
1074 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
1075 ch->id,
1076 ch->signal,
1077 plchan->name);
1078
1079 plchan->phychan = ch;
1080
1081 return 0;
1082}
1083
1084static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
1085{
1086 struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
1087
1088 atomic_inc(&plchan->last_issued);
1089 tx->cookie = atomic_read(&plchan->last_issued);
1090 /* This unlock follows the lock in the prep() function */
1091 spin_unlock_irqrestore(&plchan->lock, plchan->lockflags);
1092
1093 return tx->cookie;
1094}
1095
1096static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
1097 struct dma_chan *chan, unsigned long flags)
1098{
1099 struct dma_async_tx_descriptor *retval = NULL;
1100
1101 return retval;
1102}
1103
1104/*
1105 * Code accessing dma_async_is_complete() in a tight loop
1106 * may give problems - could schedule where indicated.
1107 * If slaves are relying on interrupts to signal completion this
1108 * function must not be called with interrupts disabled
1109 */
1110static enum dma_status
1111pl08x_dma_tx_status(struct dma_chan *chan,
1112 dma_cookie_t cookie,
1113 struct dma_tx_state *txstate)
1114{
1115 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1116 dma_cookie_t last_used;
1117 dma_cookie_t last_complete;
1118 enum dma_status ret;
1119 u32 bytesleft = 0;
1120
1121 last_used = atomic_read(&plchan->last_issued);
1122 last_complete = plchan->lc;
1123
1124 ret = dma_async_is_complete(cookie, last_complete, last_used);
1125 if (ret == DMA_SUCCESS) {
1126 dma_set_tx_state(txstate, last_complete, last_used, 0);
1127 return ret;
1128 }
1129
1130 /*
1131 * schedule(); could be inserted here
1132 */
1133
1134 /*
1135 * This cookie not complete yet
1136 */
1137 last_used = atomic_read(&plchan->last_issued);
1138 last_complete = plchan->lc;
1139
1140 /* Get number of bytes left in the active transactions and queue */
1141 bytesleft = pl08x_getbytes_chan(plchan);
1142
1143 dma_set_tx_state(txstate, last_complete, last_used,
1144 bytesleft);
1145
1146 if (plchan->state == PL08X_CHAN_PAUSED)
1147 return DMA_PAUSED;
1148
1149 /* Whether waiting or running, we're in progress */
1150 return DMA_IN_PROGRESS;
1151}
1152
1153/* PrimeCell DMA extension */
1154struct burst_table {
1155 int burstwords;
1156 u32 reg;
1157};
1158
1159static const struct burst_table burst_sizes[] = {
1160 {
1161 .burstwords = 256,
1162 .reg = (PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT) |
1163 (PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT),
1164 },
1165 {
1166 .burstwords = 128,
1167 .reg = (PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT) |
1168 (PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT),
1169 },
1170 {
1171 .burstwords = 64,
1172 .reg = (PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT) |
1173 (PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT),
1174 },
1175 {
1176 .burstwords = 32,
1177 .reg = (PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT) |
1178 (PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT),
1179 },
1180 {
1181 .burstwords = 16,
1182 .reg = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT) |
1183 (PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT),
1184 },
1185 {
1186 .burstwords = 8,
1187 .reg = (PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT) |
1188 (PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT),
1189 },
1190 {
1191 .burstwords = 4,
1192 .reg = (PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT) |
1193 (PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT),
1194 },
1195 {
1196 .burstwords = 1,
1197 .reg = (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) |
1198 (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT),
1199 },
1200};
1201
1202static void dma_set_runtime_config(struct dma_chan *chan,
1203 struct dma_slave_config *config)
1204{
1205 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1206 struct pl08x_driver_data *pl08x = plchan->host;
1207 struct pl08x_channel_data *cd = plchan->cd;
1208 enum dma_slave_buswidth addr_width;
1209 u32 maxburst;
1210 u32 cctl = 0;
1211 /* Mask out all except src and dst channel */
1212 u32 ccfg = cd->ccfg & 0x000003DEU;
1213 int i = 0;
1214
1215 /* Transfer direction */
1216 plchan->runtime_direction = config->direction;
1217 if (config->direction == DMA_TO_DEVICE) {
1218 plchan->runtime_addr = config->dst_addr;
1219 cctl |= PL080_CONTROL_SRC_INCR;
1220 ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1221 addr_width = config->dst_addr_width;
1222 maxburst = config->dst_maxburst;
1223 } else if (config->direction == DMA_FROM_DEVICE) {
1224 plchan->runtime_addr = config->src_addr;
1225 cctl |= PL080_CONTROL_DST_INCR;
1226 ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1227 addr_width = config->src_addr_width;
1228 maxburst = config->src_maxburst;
1229 } else {
1230 dev_err(&pl08x->adev->dev,
1231 "bad runtime_config: alien transfer direction\n");
1232 return;
1233 }
1234
1235 switch (addr_width) {
1236 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1237 cctl |= (PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT) |
1238 (PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT);
1239 break;
1240 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1241 cctl |= (PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT) |
1242 (PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT);
1243 break;
1244 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1245 cctl |= (PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT) |
1246 (PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT);
1247 break;
1248 default:
1249 dev_err(&pl08x->adev->dev,
1250 "bad runtime_config: alien address width\n");
1251 return;
1252 }
1253
1254 /*
1255 * Now decide on a maxburst:
1256 * If this channel will only request single transfers, set
1257 * this down to ONE element.
1258 */
1259 if (plchan->cd->single) {
1260 cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) |
1261 (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT);
1262 } else {
1263 while (i < ARRAY_SIZE(burst_sizes)) {
1264 if (burst_sizes[i].burstwords <= maxburst)
1265 break;
1266 i++;
1267 }
1268 cctl |= burst_sizes[i].reg;
1269 }
1270
1271 /* Access the cell in privileged mode, non-bufferable, non-cacheable */
1272 cctl &= ~PL080_CONTROL_PROT_MASK;
1273 cctl |= PL080_CONTROL_PROT_SYS;
1274
1275 /* Modify the default channel data to fit PrimeCell request */
1276 cd->cctl = cctl;
1277 cd->ccfg = ccfg;
1278
1279 dev_dbg(&pl08x->adev->dev,
1280 "configured channel %s (%s) for %s, data width %d, "
1281 "maxburst %d words, LE, CCTL=%08x, CCFG=%08x\n",
1282 dma_chan_name(chan), plchan->name,
1283 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
1284 addr_width,
1285 maxburst,
1286 cctl, ccfg);
1287}
1288
1289/*
1290 * Slave transactions callback to the slave device to allow
1291 * synchronization of slave DMA signals with the DMAC enable
1292 */
1293static void pl08x_issue_pending(struct dma_chan *chan)
1294{
1295 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1296 struct pl08x_driver_data *pl08x = plchan->host;
1297 unsigned long flags;
1298
1299 spin_lock_irqsave(&plchan->lock, flags);
1300 /* Something is already active */
1301 if (plchan->at) {
1302 spin_unlock_irqrestore(&plchan->lock, flags);
1303 return;
1304 }
1305
1306 /* Didn't get a physical channel so waiting for it ... */
1307 if (plchan->state == PL08X_CHAN_WAITING)
1308 return;
1309
1310 /* Take the first element in the queue and execute it */
1311 if (!list_empty(&plchan->desc_list)) {
1312 struct pl08x_txd *next;
1313
1314 next = list_first_entry(&plchan->desc_list,
1315 struct pl08x_txd,
1316 node);
1317 list_del(&next->node);
1318 plchan->at = next;
1319 plchan->state = PL08X_CHAN_RUNNING;
1320
1321 /* Configure the physical channel for the active txd */
1322 pl08x_config_phychan_for_txd(plchan);
1323 pl08x_set_cregs(pl08x, plchan->phychan);
1324 pl08x_enable_phy_chan(pl08x, plchan->phychan);
1325 }
1326
1327 spin_unlock_irqrestore(&plchan->lock, flags);
1328}
1329
1330static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1331 struct pl08x_txd *txd)
1332{
1333 int num_llis;
1334 struct pl08x_driver_data *pl08x = plchan->host;
1335 int ret;
1336
1337 num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
1338
1339 if (!num_llis)
1340 return -EINVAL;
1341
1342 spin_lock_irqsave(&plchan->lock, plchan->lockflags);
1343
1344 /*
1345 * If this device is not using a circular buffer then
1346 * queue this new descriptor for transfer.
1347 * The descriptor for a circular buffer continues
1348 * to be used until the channel is freed.
1349 */
1350 if (txd->cd->circular_buffer)
1351 dev_err(&pl08x->adev->dev,
1352 "%s attempting to queue a circular buffer\n",
1353 __func__);
1354 else
1355 list_add_tail(&txd->node,
1356 &plchan->desc_list);
1357
1358 /*
1359 * See if we already have a physical channel allocated,
1360 * else this is the time to try to get one.
1361 */
1362 ret = prep_phy_channel(plchan, txd);
1363 if (ret) {
1364 /*
1365 * No physical channel available, we will
1366 * stack up the memcpy channels until there is a channel
1367 * available to handle it whereas slave transfers may
1368 * have been denied due to platform channel muxing restrictions
1369 * and since there is no guarantee that this will ever be
1370 * resolved, and since the signal must be aquired AFTER
1371 * aquiring the physical channel, we will let them be NACK:ed
1372 * with -EBUSY here. The drivers can alway retry the prep()
1373 * call if they are eager on doing this using DMA.
1374 */
1375 if (plchan->slave) {
1376 pl08x_free_txd_list(pl08x, plchan);
1377 spin_unlock_irqrestore(&plchan->lock, plchan->lockflags);
1378 return -EBUSY;
1379 }
1380 /* Do this memcpy whenever there is a channel ready */
1381 plchan->state = PL08X_CHAN_WAITING;
1382 plchan->waiting = txd;
1383 } else
1384 /*
1385 * Else we're all set, paused and ready to roll,
1386 * status will switch to PL08X_CHAN_RUNNING when
1387 * we call issue_pending(). If there is something
1388 * running on the channel already we don't change
1389 * its state.
1390 */
1391 if (plchan->state == PL08X_CHAN_IDLE)
1392 plchan->state = PL08X_CHAN_PAUSED;
1393
1394 /*
1395 * Notice that we leave plchan->lock locked on purpose:
1396 * it will be unlocked in the subsequent tx_submit()
1397 * call. This is a consequence of the current API.
1398 */
1399
1400 return 0;
1401}
1402
1403/*
1404 * Initialize a descriptor to be used by memcpy submit
1405 */
1406static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1407 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1408 size_t len, unsigned long flags)
1409{
1410 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1411 struct pl08x_driver_data *pl08x = plchan->host;
1412 struct pl08x_txd *txd;
1413 int ret;
1414
1415 txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
1416 if (!txd) {
1417 dev_err(&pl08x->adev->dev,
1418 "%s no memory for descriptor\n", __func__);
1419 return NULL;
1420 }
1421
1422 dma_async_tx_descriptor_init(&txd->tx, chan);
1423 txd->direction = DMA_NONE;
1424 txd->srcbus.addr = src;
1425 txd->dstbus.addr = dest;
1426
1427 /* Set platform data for m2m */
1428 txd->cd = &pl08x->pd->memcpy_channel;
1429 /* Both to be incremented or the code will break */
1430 txd->cd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
1431 txd->tx.tx_submit = pl08x_tx_submit;
1432 txd->tx.callback = NULL;
1433 txd->tx.callback_param = NULL;
1434 txd->len = len;
1435
1436 INIT_LIST_HEAD(&txd->node);
1437 ret = pl08x_prep_channel_resources(plchan, txd);
1438 if (ret)
1439 return NULL;
1440 /*
1441 * NB: the channel lock is held at this point so tx_submit()
1442 * must be called in direct succession.
1443 */
1444
1445 return &txd->tx;
1446}
1447
1448struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1449 struct dma_chan *chan, struct scatterlist *sgl,
1450 unsigned int sg_len, enum dma_data_direction direction,
1451 unsigned long flags)
1452{
1453 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1454 struct pl08x_driver_data *pl08x = plchan->host;
1455 struct pl08x_txd *txd;
1456 int ret;
1457
1458 /*
1459 * Current implementation ASSUMES only one sg
1460 */
1461 if (sg_len != 1) {
1462 dev_err(&pl08x->adev->dev, "%s prepared too long sglist\n",
1463 __func__);
1464 BUG();
1465 }
1466
1467 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
1468 __func__, sgl->length, plchan->name);
1469
1470 txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
1471 if (!txd) {
1472 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
1473 return NULL;
1474 }
1475
1476 dma_async_tx_descriptor_init(&txd->tx, chan);
1477
1478 if (direction != plchan->runtime_direction)
1479 dev_err(&pl08x->adev->dev, "%s DMA setup does not match "
1480 "the direction configured for the PrimeCell\n",
1481 __func__);
1482
1483 /*
1484 * Set up addresses, the PrimeCell configured address
1485 * will take precedence since this may configure the
1486 * channel target address dynamically at runtime.
1487 */
1488 txd->direction = direction;
1489 if (direction == DMA_TO_DEVICE) {
1490 txd->srcbus.addr = sgl->dma_address;
1491 if (plchan->runtime_addr)
1492 txd->dstbus.addr = plchan->runtime_addr;
1493 else
1494 txd->dstbus.addr = plchan->cd->addr;
1495 } else if (direction == DMA_FROM_DEVICE) {
1496 if (plchan->runtime_addr)
1497 txd->srcbus.addr = plchan->runtime_addr;
1498 else
1499 txd->srcbus.addr = plchan->cd->addr;
1500 txd->dstbus.addr = sgl->dma_address;
1501 } else {
1502 dev_err(&pl08x->adev->dev,
1503 "%s direction unsupported\n", __func__);
1504 return NULL;
1505 }
1506 txd->cd = plchan->cd;
1507 txd->tx.tx_submit = pl08x_tx_submit;
1508 txd->tx.callback = NULL;
1509 txd->tx.callback_param = NULL;
1510 txd->len = sgl->length;
1511 INIT_LIST_HEAD(&txd->node);
1512
1513 ret = pl08x_prep_channel_resources(plchan, txd);
1514 if (ret)
1515 return NULL;
1516 /*
1517 * NB: the channel lock is held at this point so tx_submit()
1518 * must be called in direct succession.
1519 */
1520
1521 return &txd->tx;
1522}
1523
1524static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1525 unsigned long arg)
1526{
1527 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1528 struct pl08x_driver_data *pl08x = plchan->host;
1529 unsigned long flags;
1530 int ret = 0;
1531
1532 /* Controls applicable to inactive channels */
1533 if (cmd == DMA_SLAVE_CONFIG) {
1534 dma_set_runtime_config(chan,
1535 (struct dma_slave_config *)
1536 arg);
1537 return 0;
1538 }
1539
1540 /*
1541 * Anything succeeds on channels with no physical allocation and
1542 * no queued transfers.
1543 */
1544 spin_lock_irqsave(&plchan->lock, flags);
1545 if (!plchan->phychan && !plchan->at) {
1546 spin_unlock_irqrestore(&plchan->lock, flags);
1547 return 0;
1548 }
1549
1550 switch (cmd) {
1551 case DMA_TERMINATE_ALL:
1552 plchan->state = PL08X_CHAN_IDLE;
1553
1554 if (plchan->phychan) {
1555 pl08x_stop_phy_chan(plchan->phychan);
1556
1557 /*
1558 * Mark physical channel as free and free any slave
1559 * signal
1560 */
1561 if ((plchan->phychan->signal >= 0) &&
1562 pl08x->pd->put_signal) {
1563 pl08x->pd->put_signal(plchan);
1564 plchan->phychan->signal = -1;
1565 }
1566 pl08x_put_phy_channel(pl08x, plchan->phychan);
1567 plchan->phychan = NULL;
1568 }
1569 /* Stop any pending tasklet */
1570 tasklet_disable(&plchan->tasklet);
1571 /* Dequeue jobs and free LLIs */
1572 if (plchan->at) {
1573 pl08x_free_txd(pl08x, plchan->at);
1574 plchan->at = NULL;
1575 }
1576 /* Dequeue jobs not yet fired as well */
1577 pl08x_free_txd_list(pl08x, plchan);
1578 break;
1579 case DMA_PAUSE:
1580 pl08x_pause_phy_chan(plchan->phychan);
1581 plchan->state = PL08X_CHAN_PAUSED;
1582 break;
1583 case DMA_RESUME:
1584 pl08x_resume_phy_chan(plchan->phychan);
1585 plchan->state = PL08X_CHAN_RUNNING;
1586 break;
1587 default:
1588 /* Unknown command */
1589 ret = -ENXIO;
1590 break;
1591 }
1592
1593 spin_unlock_irqrestore(&plchan->lock, flags);
1594
1595 return ret;
1596}
1597
1598bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
1599{
1600 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1601 char *name = chan_id;
1602
1603 /* Check that the channel is not taken! */
1604 if (!strcmp(plchan->name, name))
1605 return true;
1606
1607 return false;
1608}
1609
1610/*
1611 * Just check that the device is there and active
1612 * TODO: turn this bit on/off depending on the number of
1613 * physical channels actually used, if it is zero... well
1614 * shut it off. That will save some power. Cut the clock
1615 * at the same time.
1616 */
1617static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
1618{
1619 u32 val;
1620
1621 val = readl(pl08x->base + PL080_CONFIG);
1622 val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE);
1623 /* We implictly clear bit 1 and that means little-endian mode */
1624 val |= PL080_CONFIG_ENABLE;
1625 writel(val, pl08x->base + PL080_CONFIG);
1626}
1627
1628static void pl08x_tasklet(unsigned long data)
1629{
1630 struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
1631 struct pl08x_phy_chan *phychan = plchan->phychan;
1632 struct pl08x_driver_data *pl08x = plchan->host;
1633
1634 if (!plchan)
1635 BUG();
1636
1637 spin_lock(&plchan->lock);
1638
1639 if (plchan->at) {
1640 dma_async_tx_callback callback =
1641 plchan->at->tx.callback;
1642 void *callback_param =
1643 plchan->at->tx.callback_param;
1644
1645 /*
1646 * Update last completed
1647 */
1648 plchan->lc =
1649 (plchan->at->tx.cookie);
1650
1651 /*
1652 * Callback to signal completion
1653 */
1654 if (callback)
1655 callback(callback_param);
1656
1657 /*
1658 * Device callbacks should NOT clear
1659 * the current transaction on the channel
1660 * Linus: sometimes they should?
1661 */
1662 if (!plchan->at)
1663 BUG();
1664
1665 /*
1666 * Free the descriptor if it's not for a device
1667 * using a circular buffer
1668 */
1669 if (!plchan->at->cd->circular_buffer) {
1670 pl08x_free_txd(pl08x, plchan->at);
1671 plchan->at = NULL;
1672 }
1673 /*
1674 * else descriptor for circular
1675 * buffers only freed when
1676 * client has disabled dma
1677 */
1678 }
1679 /*
1680 * If a new descriptor is queued, set it up
1681 * plchan->at is NULL here
1682 */
1683 if (!list_empty(&plchan->desc_list)) {
1684 struct pl08x_txd *next;
1685
1686 next = list_first_entry(&plchan->desc_list,
1687 struct pl08x_txd,
1688 node);
1689 list_del(&next->node);
1690 plchan->at = next;
1691 /* Configure the physical channel for the next txd */
1692 pl08x_config_phychan_for_txd(plchan);
1693 pl08x_set_cregs(pl08x, plchan->phychan);
1694 pl08x_enable_phy_chan(pl08x, plchan->phychan);
1695 } else {
1696 struct pl08x_dma_chan *waiting = NULL;
1697
1698 /*
1699 * No more jobs, so free up the physical channel
1700 * Free any allocated signal on slave transfers too
1701 */
1702 if ((phychan->signal >= 0) && pl08x->pd->put_signal) {
1703 pl08x->pd->put_signal(plchan);
1704 phychan->signal = -1;
1705 }
1706 pl08x_put_phy_channel(pl08x, phychan);
1707 plchan->phychan = NULL;
1708 plchan->state = PL08X_CHAN_IDLE;
1709
1710 /*
1711 * And NOW before anyone else can grab that free:d
1712 * up physical channel, see if there is some memcpy
1713 * pending that seriously needs to start because of
1714 * being stacked up while we were choking the
1715 * physical channels with data.
1716 */
1717 list_for_each_entry(waiting, &pl08x->memcpy.channels,
1718 chan.device_node) {
1719 if (waiting->state == PL08X_CHAN_WAITING &&
1720 waiting->waiting != NULL) {
1721 int ret;
1722
1723 /* This should REALLY not fail now */
1724 ret = prep_phy_channel(waiting,
1725 waiting->waiting);
1726 BUG_ON(ret);
1727 waiting->state = PL08X_CHAN_RUNNING;
1728 waiting->waiting = NULL;
1729 pl08x_issue_pending(&waiting->chan);
1730 break;
1731 }
1732 }
1733 }
1734
1735 spin_unlock(&plchan->lock);
1736}
1737
1738static irqreturn_t pl08x_irq(int irq, void *dev)
1739{
1740 struct pl08x_driver_data *pl08x = dev;
1741 u32 mask = 0;
1742 u32 val;
1743 int i;
1744
1745 val = readl(pl08x->base + PL080_ERR_STATUS);
1746 if (val) {
1747 /*
1748 * An error interrupt (on one or more channels)
1749 */
1750 dev_err(&pl08x->adev->dev,
1751 "%s error interrupt, register value 0x%08x\n",
1752 __func__, val);
1753 /*
1754 * Simply clear ALL PL08X error interrupts,
1755 * regardless of channel and cause
1756 * FIXME: should be 0x00000003 on PL081 really.
1757 */
1758 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
1759 }
1760 val = readl(pl08x->base + PL080_INT_STATUS);
1761 for (i = 0; i < pl08x->vd->channels; i++) {
1762 if ((1 << i) & val) {
1763 /* Locate physical channel */
1764 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
1765 struct pl08x_dma_chan *plchan = phychan->serving;
1766
1767 /* Schedule tasklet on this channel */
1768 tasklet_schedule(&plchan->tasklet);
1769
1770 mask |= (1 << i);
1771 }
1772 }
1773 /*
1774 * Clear only the terminal interrupts on channels we processed
1775 */
1776 writel(mask, pl08x->base + PL080_TC_CLEAR);
1777
1778 return mask ? IRQ_HANDLED : IRQ_NONE;
1779}
1780
1781/*
1782 * Initialise the DMAC memcpy/slave channels.
1783 * Make a local wrapper to hold required data
1784 */
1785static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1786 struct dma_device *dmadev,
1787 unsigned int channels,
1788 bool slave)
1789{
1790 struct pl08x_dma_chan *chan;
1791 int i;
1792
1793 INIT_LIST_HEAD(&dmadev->channels);
1794 /*
1795 * Register as many many memcpy as we have physical channels,
1796 * we won't always be able to use all but the code will have
1797 * to cope with that situation.
1798 */
1799 for (i = 0; i < channels; i++) {
1800 chan = kzalloc(sizeof(struct pl08x_dma_chan), GFP_KERNEL);
1801 if (!chan) {
1802 dev_err(&pl08x->adev->dev,
1803 "%s no memory for channel\n", __func__);
1804 return -ENOMEM;
1805 }
1806
1807 chan->host = pl08x;
1808 chan->state = PL08X_CHAN_IDLE;
1809
1810 if (slave) {
1811 chan->slave = true;
1812 chan->name = pl08x->pd->slave_channels[i].bus_id;
1813 chan->cd = &pl08x->pd->slave_channels[i];
1814 } else {
1815 chan->cd = &pl08x->pd->memcpy_channel;
1816 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
1817 if (!chan->name) {
1818 kfree(chan);
1819 return -ENOMEM;
1820 }
1821 }
1822 dev_info(&pl08x->adev->dev,
1823 "initialize virtual channel \"%s\"\n",
1824 chan->name);
1825
1826 chan->chan.device = dmadev;
1827 atomic_set(&chan->last_issued, 0);
1828 chan->lc = atomic_read(&chan->last_issued);
1829
1830 spin_lock_init(&chan->lock);
1831 INIT_LIST_HEAD(&chan->desc_list);
1832 tasklet_init(&chan->tasklet, pl08x_tasklet,
1833 (unsigned long) chan);
1834
1835 list_add_tail(&chan->chan.device_node, &dmadev->channels);
1836 }
1837 dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
1838 i, slave ? "slave" : "memcpy");
1839 return i;
1840}
1841
1842static void pl08x_free_virtual_channels(struct dma_device *dmadev)
1843{
1844 struct pl08x_dma_chan *chan = NULL;
1845 struct pl08x_dma_chan *next;
1846
1847 list_for_each_entry_safe(chan,
1848 next, &dmadev->channels, chan.device_node) {
1849 list_del(&chan->chan.device_node);
1850 kfree(chan);
1851 }
1852}
1853
1854#ifdef CONFIG_DEBUG_FS
1855static const char *pl08x_state_str(enum pl08x_dma_chan_state state)
1856{
1857 switch (state) {
1858 case PL08X_CHAN_IDLE:
1859 return "idle";
1860 case PL08X_CHAN_RUNNING:
1861 return "running";
1862 case PL08X_CHAN_PAUSED:
1863 return "paused";
1864 case PL08X_CHAN_WAITING:
1865 return "waiting";
1866 default:
1867 break;
1868 }
1869 return "UNKNOWN STATE";
1870}
1871
1872static int pl08x_debugfs_show(struct seq_file *s, void *data)
1873{
1874 struct pl08x_driver_data *pl08x = s->private;
1875 struct pl08x_dma_chan *chan;
1876 struct pl08x_phy_chan *ch;
1877 unsigned long flags;
1878 int i;
1879
1880 seq_printf(s, "PL08x physical channels:\n");
1881 seq_printf(s, "CHANNEL:\tUSER:\n");
1882 seq_printf(s, "--------\t-----\n");
1883 for (i = 0; i < pl08x->vd->channels; i++) {
1884 struct pl08x_dma_chan *virt_chan;
1885
1886 ch = &pl08x->phy_chans[i];
1887
1888 spin_lock_irqsave(&ch->lock, flags);
1889 virt_chan = ch->serving;
1890
1891 seq_printf(s, "%d\t\t%s\n",
1892 ch->id, virt_chan ? virt_chan->name : "(none)");
1893
1894 spin_unlock_irqrestore(&ch->lock, flags);
1895 }
1896
1897 seq_printf(s, "\nPL08x virtual memcpy channels:\n");
1898 seq_printf(s, "CHANNEL:\tSTATE:\n");
1899 seq_printf(s, "--------\t------\n");
1900 list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) {
1901 seq_printf(s, "%s\t\t\%s\n", chan->name,
1902 pl08x_state_str(chan->state));
1903 }
1904
1905 seq_printf(s, "\nPL08x virtual slave channels:\n");
1906 seq_printf(s, "CHANNEL:\tSTATE:\n");
1907 seq_printf(s, "--------\t------\n");
1908 list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) {
1909 seq_printf(s, "%s\t\t\%s\n", chan->name,
1910 pl08x_state_str(chan->state));
1911 }
1912
1913 return 0;
1914}
1915
1916static int pl08x_debugfs_open(struct inode *inode, struct file *file)
1917{
1918 return single_open(file, pl08x_debugfs_show, inode->i_private);
1919}
1920
1921static const struct file_operations pl08x_debugfs_operations = {
1922 .open = pl08x_debugfs_open,
1923 .read = seq_read,
1924 .llseek = seq_lseek,
1925 .release = single_release,
1926};
1927
1928static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
1929{
1930 /* Expose a simple debugfs interface to view all clocks */
1931 (void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO,
1932 NULL, pl08x,
1933 &pl08x_debugfs_operations);
1934}
1935
1936#else
1937static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
1938{
1939}
1940#endif
1941
1942static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
1943{
1944 struct pl08x_driver_data *pl08x;
1945 struct vendor_data *vd = id->data;
1946 int ret = 0;
1947 int i;
1948
1949 ret = amba_request_regions(adev, NULL);
1950 if (ret)
1951 return ret;
1952
1953 /* Create the driver state holder */
1954 pl08x = kzalloc(sizeof(struct pl08x_driver_data), GFP_KERNEL);
1955 if (!pl08x) {
1956 ret = -ENOMEM;
1957 goto out_no_pl08x;
1958 }
1959
1960 /* Initialize memcpy engine */
1961 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
1962 pl08x->memcpy.dev = &adev->dev;
1963 pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources;
1964 pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources;
1965 pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy;
1966 pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
1967 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
1968 pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
1969 pl08x->memcpy.device_control = pl08x_control;
1970
1971 /* Initialize slave engine */
1972 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
1973 pl08x->slave.dev = &adev->dev;
1974 pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources;
1975 pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
1976 pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
1977 pl08x->slave.device_tx_status = pl08x_dma_tx_status;
1978 pl08x->slave.device_issue_pending = pl08x_issue_pending;
1979 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
1980 pl08x->slave.device_control = pl08x_control;
1981
1982 /* Get the platform data */
1983 pl08x->pd = dev_get_platdata(&adev->dev);
1984 if (!pl08x->pd) {
1985 dev_err(&adev->dev, "no platform data supplied\n");
1986 goto out_no_platdata;
1987 }
1988
1989 /* Assign useful pointers to the driver state */
1990 pl08x->adev = adev;
1991 pl08x->vd = vd;
1992
1993 /* A DMA memory pool for LLIs, align on 1-byte boundary */
1994 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
1995 PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0);
1996 if (!pl08x->pool) {
1997 ret = -ENOMEM;
1998 goto out_no_lli_pool;
1999 }
2000
2001 spin_lock_init(&pl08x->lock);
2002
2003 pl08x->base = ioremap(adev->res.start, resource_size(&adev->res));
2004 if (!pl08x->base) {
2005 ret = -ENOMEM;
2006 goto out_no_ioremap;
2007 }
2008
2009 /* Turn on the PL08x */
2010 pl08x_ensure_on(pl08x);
2011
2012 /*
2013 * Attach the interrupt handler
2014 */
2015 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
2016 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
2017
2018 ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED,
2019 vd->name, pl08x);
2020 if (ret) {
2021 dev_err(&adev->dev, "%s failed to request interrupt %d\n",
2022 __func__, adev->irq[0]);
2023 goto out_no_irq;
2024 }
2025
2026 /* Initialize physical channels */
2027 pl08x->phy_chans = kmalloc((vd->channels * sizeof(struct pl08x_phy_chan)),
2028 GFP_KERNEL);
2029 if (!pl08x->phy_chans) {
2030 dev_err(&adev->dev, "%s failed to allocate "
2031 "physical channel holders\n",
2032 __func__);
2033 goto out_no_phychans;
2034 }
2035
2036 for (i = 0; i < vd->channels; i++) {
2037 struct pl08x_phy_chan *ch = &pl08x->phy_chans[i];
2038
2039 ch->id = i;
2040 ch->base = pl08x->base + PL080_Cx_BASE(i);
2041 spin_lock_init(&ch->lock);
2042 ch->serving = NULL;
2043 ch->signal = -1;
2044 dev_info(&adev->dev,
2045 "physical channel %d is %s\n", i,
2046 pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
2047 }
2048
2049 /* Register as many memcpy channels as there are physical channels */
2050 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy,
2051 pl08x->vd->channels, false);
2052 if (ret <= 0) {
2053 dev_warn(&pl08x->adev->dev,
2054 "%s failed to enumerate memcpy channels - %d\n",
2055 __func__, ret);
2056 goto out_no_memcpy;
2057 }
2058 pl08x->memcpy.chancnt = ret;
2059
2060 /* Register slave channels */
2061 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
2062 pl08x->pd->num_slave_channels,
2063 true);
2064 if (ret <= 0) {
2065 dev_warn(&pl08x->adev->dev,
2066 "%s failed to enumerate slave channels - %d\n",
2067 __func__, ret);
2068 goto out_no_slave;
2069 }
2070 pl08x->slave.chancnt = ret;
2071
2072 ret = dma_async_device_register(&pl08x->memcpy);
2073 if (ret) {
2074 dev_warn(&pl08x->adev->dev,
2075 "%s failed to register memcpy as an async device - %d\n",
2076 __func__, ret);
2077 goto out_no_memcpy_reg;
2078 }
2079
2080 ret = dma_async_device_register(&pl08x->slave);
2081 if (ret) {
2082 dev_warn(&pl08x->adev->dev,
2083 "%s failed to register slave as an async device - %d\n",
2084 __func__, ret);
2085 goto out_no_slave_reg;
2086 }
2087
2088 amba_set_drvdata(adev, pl08x);
2089 init_pl08x_debugfs(pl08x);
2090 dev_info(&pl08x->adev->dev, "ARM(R) %s DMA block initialized @%08x\n",
2091 vd->name, adev->res.start);
2092 return 0;
2093
2094out_no_slave_reg:
2095 dma_async_device_unregister(&pl08x->memcpy);
2096out_no_memcpy_reg:
2097 pl08x_free_virtual_channels(&pl08x->slave);
2098out_no_slave:
2099 pl08x_free_virtual_channels(&pl08x->memcpy);
2100out_no_memcpy:
2101 kfree(pl08x->phy_chans);
2102out_no_phychans:
2103 free_irq(adev->irq[0], pl08x);
2104out_no_irq:
2105 iounmap(pl08x->base);
2106out_no_ioremap:
2107 dma_pool_destroy(pl08x->pool);
2108out_no_lli_pool:
2109out_no_platdata:
2110 kfree(pl08x);
2111out_no_pl08x:
2112 amba_release_regions(adev);
2113 return ret;
2114}
2115
2116/* PL080 has 8 channels and the PL080 have just 2 */
2117static struct vendor_data vendor_pl080 = {
2118 .name = "PL080",
2119 .channels = 8,
2120 .dualmaster = true,
2121};
2122
2123static struct vendor_data vendor_pl081 = {
2124 .name = "PL081",
2125 .channels = 2,
2126 .dualmaster = false,
2127};
2128
2129static struct amba_id pl08x_ids[] = {
2130 /* PL080 */
2131 {
2132 .id = 0x00041080,
2133 .mask = 0x000fffff,
2134 .data = &vendor_pl080,
2135 },
2136 /* PL081 */
2137 {
2138 .id = 0x00041081,
2139 .mask = 0x000fffff,
2140 .data = &vendor_pl081,
2141 },
2142 /* Nomadik 8815 PL080 variant */
2143 {
2144 .id = 0x00280880,
2145 .mask = 0x00ffffff,
2146 .data = &vendor_pl080,
2147 },
2148 { 0, 0 },
2149};
2150
2151static struct amba_driver pl08x_amba_driver = {
2152 .drv.name = DRIVER_NAME,
2153 .id_table = pl08x_ids,
2154 .probe = pl08x_probe,
2155};
2156
2157static int __init pl08x_init(void)
2158{
2159 int retval;
2160 retval = amba_driver_register(&pl08x_amba_driver);
2161 if (retval)
2162 printk(KERN_WARNING DRIVER_NAME
2163 "failed to register as an amba device (%d)\n",
2164 retval);
2165 return retval;
2166}
2167subsys_initcall(pl08x_init);
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
new file mode 100644
index 000000000000..521a0f8974ac
--- /dev/null
+++ b/include/linux/amba/pl08x.h
@@ -0,0 +1,222 @@
1/*
2 * linux/amba/pl08x.h - ARM PrimeCell DMA Controller driver
3 *
4 * Copyright (C) 2005 ARM Ltd
5 * Copyright (C) 2010 ST-Ericsson SA
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * pl08x information required by platform code
12 *
13 * Please credit ARM.com
14 * Documentation: ARM DDI 0196D
15 *
16 */
17
18#ifndef AMBA_PL08X_H
19#define AMBA_PL08X_H
20
21/* We need sizes of structs from this header */
22#include <linux/dmaengine.h>
23#include <linux/interrupt.h>
24
25/**
26 * struct pl08x_channel_data - data structure to pass info between
27 * platform and PL08x driver regarding channel configuration
28 * @bus_id: name of this device channel, not just a device name since
29 * devices may have more than one channel e.g. "foo_tx"
30 * @min_signal: the minimum DMA signal number to be muxed in for this
31 * channel (for platforms supporting muxed signals). If you have
32 * static assignments, make sure this is set to the assigned signal
33 * number, PL08x have 16 possible signals in number 0 thru 15 so
34 * when these are not enough they often get muxed (in hardware)
35 * disabling simultaneous use of the same channel for two devices.
36 * @max_signal: the maximum DMA signal number to be muxed in for
37 * the channel. Set to the same as min_signal for
38 * devices with static assignments
39 * @muxval: a number usually used to poke into some mux regiser to
40 * mux in the signal to this channel
41 * @cctl_opt: default options for the channel control register
42 * @addr: source/target address in physical memory for this DMA channel,
43 * can be the address of a FIFO register for burst requests for example.
44 * This can be left undefined if the PrimeCell API is used for configuring
45 * this.
46 * @circular_buffer: whether the buffer passed in is circular and
47 * shall simply be looped round round (like a record baby round
48 * round round round)
49 * @single: the device connected to this channel will request single
50 * DMA transfers, not bursts. (Bursts are default.)
51 */
52struct pl08x_channel_data {
53 char *bus_id;
54 int min_signal;
55 int max_signal;
56 u32 muxval;
57 u32 cctl;
58 u32 ccfg;
59 dma_addr_t addr;
60 bool circular_buffer;
61 bool single;
62};
63
64/**
65 * Struct pl08x_bus_data - information of source or destination
66 * busses for a transfer
67 * @addr: current address
68 * @maxwidth: the maximum width of a transfer on this bus
69 * @buswidth: the width of this bus in bytes: 1, 2 or 4
70 * @fill_bytes: bytes required to fill to the next bus memory
71 * boundary
72 */
73struct pl08x_bus_data {
74 dma_addr_t addr;
75 u8 maxwidth;
76 u8 buswidth;
77 u32 fill_bytes;
78};
79
80/**
81 * struct pl08x_phy_chan - holder for the physical channels
82 * @id: physical index to this channel
83 * @lock: a lock to use when altering an instance of this struct
84 * @signal: the physical signal (aka channel) serving this
85 * physical channel right now
86 * @serving: the virtual channel currently being served by this
87 * physical channel
88 */
89struct pl08x_phy_chan {
90 unsigned int id;
91 void __iomem *base;
92 spinlock_t lock;
93 int signal;
94 struct pl08x_dma_chan *serving;
95 u32 csrc;
96 u32 cdst;
97 u32 clli;
98 u32 cctl;
99 u32 ccfg;
100};
101
102/**
103 * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
104 * @llis_bus: DMA memory address (physical) start for the LLIs
105 * @llis_va: virtual memory address start for the LLIs
106 */
107struct pl08x_txd {
108 struct dma_async_tx_descriptor tx;
109 struct list_head node;
110 enum dma_data_direction direction;
111 struct pl08x_bus_data srcbus;
112 struct pl08x_bus_data dstbus;
113 int len;
114 dma_addr_t llis_bus;
115 void *llis_va;
116 struct pl08x_channel_data *cd;
117 bool active;
118 /*
119 * Settings to be put into the physical channel when we
120 * trigger this txd
121 */
122 u32 csrc;
123 u32 cdst;
124 u32 clli;
125 u32 cctl;
126};
127
128/**
129 * struct pl08x_dma_chan_state - holds the PL08x specific virtual
130 * channel states
131 * @PL08X_CHAN_IDLE: the channel is idle
132 * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
133 * channel and is running a transfer on it
134 * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
135 * channel, but the transfer is currently paused
136 * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
137 * channel to become available (only pertains to memcpy channels)
138 */
139enum pl08x_dma_chan_state {
140 PL08X_CHAN_IDLE,
141 PL08X_CHAN_RUNNING,
142 PL08X_CHAN_PAUSED,
143 PL08X_CHAN_WAITING,
144};
145
146/**
147 * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
148 * @chan: wrappped abstract channel
149 * @phychan: the physical channel utilized by this channel, if there is one
150 * @tasklet: tasklet scheduled by the IRQ to handle actual work etc
151 * @name: name of channel
152 * @cd: channel platform data
153 * @runtime_addr: address for RX/TX according to the runtime config
154 * @runtime_direction: current direction of this channel according to
155 * runtime config
156 * @lc: last completed transaction on this channel
157 * @desc_list: queued transactions pending on this channel
158 * @at: active transaction on this channel
159 * @lockflags: sometimes we let a lock last between two function calls,
160 * especially prep/submit, and then we need to store the IRQ flags
161 * in the channel state, here
162 * @lock: a lock for this channel data
163 * @host: a pointer to the host (internal use)
164 * @state: whether the channel is idle, paused, running etc
165 * @slave: whether this channel is a device (slave) or for memcpy
166 * @waiting: a TX descriptor on this channel which is waiting for
167 * a physical channel to become available
168 */
169struct pl08x_dma_chan {
170 struct dma_chan chan;
171 struct pl08x_phy_chan *phychan;
172 struct tasklet_struct tasklet;
173 char *name;
174 struct pl08x_channel_data *cd;
175 dma_addr_t runtime_addr;
176 enum dma_data_direction runtime_direction;
177 atomic_t last_issued;
178 dma_cookie_t lc;
179 struct list_head desc_list;
180 struct pl08x_txd *at;
181 unsigned long lockflags;
182 spinlock_t lock;
183 void *host;
184 enum pl08x_dma_chan_state state;
185 bool slave;
186 struct pl08x_txd *waiting;
187};
188
189/**
190 * struct pl08x_platform_data - the platform configuration for the
191 * PL08x PrimeCells.
192 * @slave_channels: the channels defined for the different devices on the
193 * platform, all inclusive, including multiplexed channels. The available
194 * physical channels will be multiplexed around these signals as they
195 * are requested, just enumerate all possible channels.
196 * @get_signal: request a physical signal to be used for a DMA
197 * transfer immediately: if there is some multiplexing or similar blocking
198 * the use of the channel the transfer can be denied by returning
199 * less than zero, else it returns the allocated signal number
200 * @put_signal: indicate to the platform that this physical signal is not
201 * running any DMA transfer and multiplexing can be recycled
202 * @bus_bit_lli: Bit[0] of the address indicated which AHB bus master the
203 * LLI addresses are on 0/1 Master 1/2.
204 */
205struct pl08x_platform_data {
206 struct pl08x_channel_data *slave_channels;
207 unsigned int num_slave_channels;
208 struct pl08x_channel_data memcpy_channel;
209 int (*get_signal)(struct pl08x_dma_chan *);
210 void (*put_signal)(struct pl08x_dma_chan *);
211};
212
213#ifdef CONFIG_AMBA_PL08X
214bool pl08x_filter_id(struct dma_chan *chan, void *chan_id);
215#else
216static inline bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
217{
218 return false;
219}
220#endif
221
222#endif /* AMBA_PL08X_H */