aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/amba-pl08x.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/amba-pl08x.c')
-rw-r--r--drivers/dma/amba-pl08x.c1168
1 files changed, 533 insertions, 635 deletions
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index b605cc9ac3a2..297f48b0cba9 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -19,14 +19,14 @@
19 * this program; if not, write to the Free Software Foundation, Inc., 59 19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 * 21 *
22 * The full GNU General Public License is iin this distribution in the 22 * The full GNU General Public License is in this distribution in the file
23 * file called COPYING. 23 * called COPYING.
24 * 24 *
25 * Documentation: ARM DDI 0196G == PL080 25 * Documentation: ARM DDI 0196G == PL080
26 * Documentation: ARM DDI 0218E == PL081 26 * Documentation: ARM DDI 0218E == PL081
27 * 27 *
28 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to 28 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
29 * any channel. 29 * channel.
30 * 30 *
31 * The PL080 has 8 channels available for simultaneous use, and the PL081 31 * The PL080 has 8 channels available for simultaneous use, and the PL081
32 * has only two channels. So on these DMA controllers the number of channels 32 * has only two channels. So on these DMA controllers the number of channels
@@ -53,7 +53,23 @@
53 * 53 *
54 * ASSUMES default (little) endianness for DMA transfers 54 * ASSUMES default (little) endianness for DMA transfers
55 * 55 *
56 * Only DMAC flow control is implemented 56 * The PL08x has two flow control settings:
57 * - DMAC flow control: the transfer size defines the number of transfers
58 * which occur for the current LLI entry, and the DMAC raises TC at the
59 * end of every LLI entry. Observed behaviour shows the DMAC listening
60 * to both the BREQ and SREQ signals (contrary to documented),
61 * transferring data if either is active. The LBREQ and LSREQ signals
62 * are ignored.
63 *
64 * - Peripheral flow control: the transfer size is ignored (and should be
65 * zero). The data is transferred from the current LLI entry, until
66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC
67 * will then move to the next LLI entry.
68 *
69 * Only the former works sanely with scatter lists, so we only implement
70 * the DMAC flow control method. However, peripherals which use the LBREQ
71 * and LSREQ signals (eg, MMCI) are unable to use this mode, which through
72 * these hardware restrictions prevents them from using scatter DMA.
57 * 73 *
58 * Global TODO: 74 * Global TODO:
59 * - Break out common code from arch/arm/mach-s3c64xx and share 75 * - Break out common code from arch/arm/mach-s3c64xx and share
@@ -61,50 +77,39 @@
61#include <linux/device.h> 77#include <linux/device.h>
62#include <linux/init.h> 78#include <linux/init.h>
63#include <linux/module.h> 79#include <linux/module.h>
64#include <linux/pci.h>
65#include <linux/interrupt.h> 80#include <linux/interrupt.h>
66#include <linux/slab.h> 81#include <linux/slab.h>
67#include <linux/dmapool.h> 82#include <linux/dmapool.h>
68#include <linux/amba/bus.h>
69#include <linux/dmaengine.h> 83#include <linux/dmaengine.h>
84#include <linux/amba/bus.h>
70#include <linux/amba/pl08x.h> 85#include <linux/amba/pl08x.h>
71#include <linux/debugfs.h> 86#include <linux/debugfs.h>
72#include <linux/seq_file.h> 87#include <linux/seq_file.h>
73 88
74#include <asm/hardware/pl080.h> 89#include <asm/hardware/pl080.h>
75#include <asm/dma.h>
76#include <asm/mach/dma.h>
77#include <asm/atomic.h>
78#include <asm/processor.h>
79#include <asm/cacheflush.h>
80 90
81#define DRIVER_NAME "pl08xdmac" 91#define DRIVER_NAME "pl08xdmac"
82 92
83/** 93/**
84 * struct vendor_data - vendor-specific config parameters 94 * struct vendor_data - vendor-specific config parameters for PL08x derivatives
85 * for PL08x derivates
86 * @name: the name of this specific variant
87 * @channels: the number of channels available in this variant 95 * @channels: the number of channels available in this variant
88 * @dualmaster: whether this version supports dual AHB masters 96 * @dualmaster: whether this version supports dual AHB masters or not.
89 * or not.
90 */ 97 */
91struct vendor_data { 98struct vendor_data {
92 char *name;
93 u8 channels; 99 u8 channels;
94 bool dualmaster; 100 bool dualmaster;
95}; 101};
96 102
97/* 103/*
98 * PL08X private data structures 104 * PL08X private data structures
99 * An LLI struct - see pl08x TRM 105 * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit,
100 * Note that next uses bit[0] as a bus bit, 106 * start & end do not - their bus bit info is in cctl. Also note that these
101 * start & end do not - their bus bit info 107 * are fixed 32-bit quantities.
102 * is in cctl
103 */ 108 */
104struct lli { 109struct pl08x_lli {
105 dma_addr_t src; 110 u32 src;
106 dma_addr_t dst; 111 u32 dst;
107 dma_addr_t next; 112 u32 lli;
108 u32 cctl; 113 u32 cctl;
109}; 114};
110 115
@@ -119,6 +124,8 @@ struct lli {
119 * @phy_chans: array of data for the physical channels 124 * @phy_chans: array of data for the physical channels
120 * @pool: a pool for the LLI descriptors 125 * @pool: a pool for the LLI descriptors
121 * @pool_ctr: counter of LLIs in the pool 126 * @pool_ctr: counter of LLIs in the pool
127 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches
128 * @mem_buses: set to indicate memory transfers on AHB2.
122 * @lock: a spinlock for this struct 129 * @lock: a spinlock for this struct
123 */ 130 */
124struct pl08x_driver_data { 131struct pl08x_driver_data {
@@ -126,11 +133,13 @@ struct pl08x_driver_data {
126 struct dma_device memcpy; 133 struct dma_device memcpy;
127 void __iomem *base; 134 void __iomem *base;
128 struct amba_device *adev; 135 struct amba_device *adev;
129 struct vendor_data *vd; 136 const struct vendor_data *vd;
130 struct pl08x_platform_data *pd; 137 struct pl08x_platform_data *pd;
131 struct pl08x_phy_chan *phy_chans; 138 struct pl08x_phy_chan *phy_chans;
132 struct dma_pool *pool; 139 struct dma_pool *pool;
133 int pool_ctr; 140 int pool_ctr;
141 u8 lli_buses;
142 u8 mem_buses;
134 spinlock_t lock; 143 spinlock_t lock;
135}; 144};
136 145
@@ -152,9 +161,9 @@ struct pl08x_driver_data {
152/* Size (bytes) of each LLI buffer allocated for one transfer */ 161/* Size (bytes) of each LLI buffer allocated for one transfer */
153# define PL08X_LLI_TSFR_SIZE 0x2000 162# define PL08X_LLI_TSFR_SIZE 0x2000
154 163
155/* Maximimum times we call dma_pool_alloc on this pool without freeing */ 164/* Maximum times we call dma_pool_alloc on this pool without freeing */
156#define PL08X_MAX_ALLOCS 0x40 165#define PL08X_MAX_ALLOCS 0x40
157#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct lli)) 166#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
158#define PL08X_ALIGN 8 167#define PL08X_ALIGN 8
159 168
160static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) 169static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
@@ -162,6 +171,11 @@ static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
162 return container_of(chan, struct pl08x_dma_chan, chan); 171 return container_of(chan, struct pl08x_dma_chan, chan);
163} 172}
164 173
174static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
175{
176 return container_of(tx, struct pl08x_txd, tx);
177}
178
165/* 179/*
166 * Physical channel handling 180 * Physical channel handling
167 */ 181 */
@@ -177,88 +191,47 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
177 191
178/* 192/*
179 * Set the initial DMA register values i.e. those for the first LLI 193 * Set the initial DMA register values i.e. those for the first LLI
180 * The next lli pointer and the configuration interrupt bit have 194 * The next LLI pointer and the configuration interrupt bit have
181 * been set when the LLIs were constructed 195 * been set when the LLIs were constructed. Poke them into the hardware
196 * and start the transfer.
182 */ 197 */
183static void pl08x_set_cregs(struct pl08x_driver_data *pl08x, 198static void pl08x_start_txd(struct pl08x_dma_chan *plchan,
184 struct pl08x_phy_chan *ch) 199 struct pl08x_txd *txd)
185{
186 /* Wait for channel inactive */
187 while (pl08x_phy_channel_busy(ch))
188 ;
189
190 dev_vdbg(&pl08x->adev->dev,
191 "WRITE channel %d: csrc=%08x, cdst=%08x, "
192 "cctl=%08x, clli=%08x, ccfg=%08x\n",
193 ch->id,
194 ch->csrc,
195 ch->cdst,
196 ch->cctl,
197 ch->clli,
198 ch->ccfg);
199
200 writel(ch->csrc, ch->base + PL080_CH_SRC_ADDR);
201 writel(ch->cdst, ch->base + PL080_CH_DST_ADDR);
202 writel(ch->clli, ch->base + PL080_CH_LLI);
203 writel(ch->cctl, ch->base + PL080_CH_CONTROL);
204 writel(ch->ccfg, ch->base + PL080_CH_CONFIG);
205}
206
207static inline void pl08x_config_phychan_for_txd(struct pl08x_dma_chan *plchan)
208{ 200{
209 struct pl08x_channel_data *cd = plchan->cd; 201 struct pl08x_driver_data *pl08x = plchan->host;
210 struct pl08x_phy_chan *phychan = plchan->phychan; 202 struct pl08x_phy_chan *phychan = plchan->phychan;
211 struct pl08x_txd *txd = plchan->at; 203 struct pl08x_lli *lli = &txd->llis_va[0];
212
213 /* Copy the basic control register calculated at transfer config */
214 phychan->csrc = txd->csrc;
215 phychan->cdst = txd->cdst;
216 phychan->clli = txd->clli;
217 phychan->cctl = txd->cctl;
218
219 /* Assign the signal to the proper control registers */
220 phychan->ccfg = cd->ccfg;
221 phychan->ccfg &= ~PL080_CONFIG_SRC_SEL_MASK;
222 phychan->ccfg &= ~PL080_CONFIG_DST_SEL_MASK;
223 /* If it wasn't set from AMBA, ignore it */
224 if (txd->direction == DMA_TO_DEVICE)
225 /* Select signal as destination */
226 phychan->ccfg |=
227 (phychan->signal << PL080_CONFIG_DST_SEL_SHIFT);
228 else if (txd->direction == DMA_FROM_DEVICE)
229 /* Select signal as source */
230 phychan->ccfg |=
231 (phychan->signal << PL080_CONFIG_SRC_SEL_SHIFT);
232 /* Always enable error interrupts */
233 phychan->ccfg |= PL080_CONFIG_ERR_IRQ_MASK;
234 /* Always enable terminal interrupts */
235 phychan->ccfg |= PL080_CONFIG_TC_IRQ_MASK;
236}
237
238/*
239 * Enable the DMA channel
240 * Assumes all other configuration bits have been set
241 * as desired before this code is called
242 */
243static void pl08x_enable_phy_chan(struct pl08x_driver_data *pl08x,
244 struct pl08x_phy_chan *ch)
245{
246 u32 val; 204 u32 val;
247 205
248 /* 206 plchan->at = txd;
249 * Do not access config register until channel shows as disabled
250 */
251 while (readl(pl08x->base + PL080_EN_CHAN) & (1 << ch->id))
252 ;
253 207
254 /* 208 /* Wait for channel inactive */
255 * Do not access config register until channel shows as inactive 209 while (pl08x_phy_channel_busy(phychan))
256 */ 210 cpu_relax();
257 val = readl(ch->base + PL080_CH_CONFIG); 211
212 dev_vdbg(&pl08x->adev->dev,
213 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
214 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
215 phychan->id, lli->src, lli->dst, lli->lli, lli->cctl,
216 txd->ccfg);
217
218 writel(lli->src, phychan->base + PL080_CH_SRC_ADDR);
219 writel(lli->dst, phychan->base + PL080_CH_DST_ADDR);
220 writel(lli->lli, phychan->base + PL080_CH_LLI);
221 writel(lli->cctl, phychan->base + PL080_CH_CONTROL);
222 writel(txd->ccfg, phychan->base + PL080_CH_CONFIG);
223
224 /* Enable the DMA channel */
225 /* Do not access config register until channel shows as disabled */
226 while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id))
227 cpu_relax();
228
229 /* Do not access config register until channel shows as inactive */
230 val = readl(phychan->base + PL080_CH_CONFIG);
258 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) 231 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
259 val = readl(ch->base + PL080_CH_CONFIG); 232 val = readl(phychan->base + PL080_CH_CONFIG);
260 233
261 writel(val | PL080_CONFIG_ENABLE, ch->base + PL080_CH_CONFIG); 234 writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG);
262} 235}
263 236
264/* 237/*
@@ -266,10 +239,8 @@ static void pl08x_enable_phy_chan(struct pl08x_driver_data *pl08x,
266 * 239 *
267 * Disabling individual channels could lose data. 240 * Disabling individual channels could lose data.
268 * 241 *
269 * Disable the peripheral DMA after disabling the DMAC 242 * Disable the peripheral DMA after disabling the DMAC in order to allow
270 * in order to allow the DMAC FIFO to drain, and 243 * the DMAC FIFO to drain, and hence allow the channel to show inactive
271 * hence allow the channel to show inactive
272 *
273 */ 244 */
274static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) 245static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
275{ 246{
@@ -282,7 +253,7 @@ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
282 253
283 /* Wait for channel inactive */ 254 /* Wait for channel inactive */
284 while (pl08x_phy_channel_busy(ch)) 255 while (pl08x_phy_channel_busy(ch))
285 ; 256 cpu_relax();
286} 257}
287 258
288static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) 259static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
@@ -333,54 +304,56 @@ static inline u32 get_bytes_in_cctl(u32 cctl)
333static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) 304static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
334{ 305{
335 struct pl08x_phy_chan *ch; 306 struct pl08x_phy_chan *ch;
336 struct pl08x_txd *txdi = NULL;
337 struct pl08x_txd *txd; 307 struct pl08x_txd *txd;
338 unsigned long flags; 308 unsigned long flags;
339 u32 bytes = 0; 309 size_t bytes = 0;
340 310
341 spin_lock_irqsave(&plchan->lock, flags); 311 spin_lock_irqsave(&plchan->lock, flags);
342
343 ch = plchan->phychan; 312 ch = plchan->phychan;
344 txd = plchan->at; 313 txd = plchan->at;
345 314
346 /* 315 /*
347 * Next follow the LLIs to get the number of pending bytes in the 316 * Follow the LLIs to get the number of remaining
348 * currently active transaction. 317 * bytes in the currently active transaction.
349 */ 318 */
350 if (ch && txd) { 319 if (ch && txd) {
351 struct lli *llis_va = txd->llis_va; 320 u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
352 struct lli *llis_bus = (struct lli *) txd->llis_bus;
353 u32 clli = readl(ch->base + PL080_CH_LLI);
354 321
355 /* First get the bytes in the current active LLI */ 322 /* First get the remaining bytes in the active transfer */
356 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); 323 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
357 324
358 if (clli) { 325 if (clli) {
359 int i = 0; 326 struct pl08x_lli *llis_va = txd->llis_va;
327 dma_addr_t llis_bus = txd->llis_bus;
328 int index;
329
330 BUG_ON(clli < llis_bus || clli >= llis_bus +
331 sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS);
332
333 /*
334 * Locate the next LLI - as this is an array,
335 * it's simple maths to find.
336 */
337 index = (clli - llis_bus) / sizeof(struct pl08x_lli);
360 338
361 /* Forward to the LLI pointed to by clli */ 339 for (; index < MAX_NUM_TSFR_LLIS; index++) {
362 while ((clli != (u32) &(llis_bus[i])) && 340 bytes += get_bytes_in_cctl(llis_va[index].cctl);
363 (i < MAX_NUM_TSFR_LLIS))
364 i++;
365 341
366 while (clli) {
367 bytes += get_bytes_in_cctl(llis_va[i].cctl);
368 /* 342 /*
369 * A clli of 0x00000000 will terminate the 343 * A LLI pointer of 0 terminates the LLI list
370 * LLI list
371 */ 344 */
372 clli = llis_va[i].next; 345 if (!llis_va[index].lli)
373 i++; 346 break;
374 } 347 }
375 } 348 }
376 } 349 }
377 350
378 /* Sum up all queued transactions */ 351 /* Sum up all queued transactions */
379 if (!list_empty(&plchan->desc_list)) { 352 if (!list_empty(&plchan->pend_list)) {
380 list_for_each_entry(txdi, &plchan->desc_list, node) { 353 struct pl08x_txd *txdi;
354 list_for_each_entry(txdi, &plchan->pend_list, node) {
381 bytes += txdi->len; 355 bytes += txdi->len;
382 } 356 }
383
384 } 357 }
385 358
386 spin_unlock_irqrestore(&plchan->lock, flags); 359 spin_unlock_irqrestore(&plchan->lock, flags);
@@ -390,6 +363,10 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
390 363
391/* 364/*
392 * Allocate a physical channel for a virtual channel 365 * Allocate a physical channel for a virtual channel
366 *
367 * Try to locate a physical channel to be used for this transfer. If all
368 * are taken return NULL and the requester will have to cope by using
369 * some fallback PIO mode or retrying later.
393 */ 370 */
394static struct pl08x_phy_chan * 371static struct pl08x_phy_chan *
395pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, 372pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
@@ -399,12 +376,6 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
399 unsigned long flags; 376 unsigned long flags;
400 int i; 377 int i;
401 378
402 /*
403 * Try to locate a physical channel to be used for
404 * this transfer. If all are taken return NULL and
405 * the requester will have to cope by using some fallback
406 * PIO mode or retrying later.
407 */
408 for (i = 0; i < pl08x->vd->channels; i++) { 379 for (i = 0; i < pl08x->vd->channels; i++) {
409 ch = &pl08x->phy_chans[i]; 380 ch = &pl08x->phy_chans[i];
410 381
@@ -465,11 +436,11 @@ static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded)
465} 436}
466 437
467static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, 438static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
468 u32 tsize) 439 size_t tsize)
469{ 440{
470 u32 retbits = cctl; 441 u32 retbits = cctl;
471 442
472 /* Remove all src, dst and transfersize bits */ 443 /* Remove all src, dst and transfer size bits */
473 retbits &= ~PL080_CONTROL_DWIDTH_MASK; 444 retbits &= ~PL080_CONTROL_DWIDTH_MASK;
474 retbits &= ~PL080_CONTROL_SWIDTH_MASK; 445 retbits &= ~PL080_CONTROL_SWIDTH_MASK;
475 retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; 446 retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK;
@@ -509,95 +480,87 @@ static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
509 return retbits; 480 return retbits;
510} 481}
511 482
483struct pl08x_lli_build_data {
484 struct pl08x_txd *txd;
485 struct pl08x_driver_data *pl08x;
486 struct pl08x_bus_data srcbus;
487 struct pl08x_bus_data dstbus;
488 size_t remainder;
489};
490
512/* 491/*
513 * Autoselect a master bus to use for the transfer 492 * Autoselect a master bus to use for the transfer this prefers the
514 * this prefers the destination bus if both available 493 * destination bus if both available if fixed address on one bus the
515 * if fixed address on one bus the other will be chosen 494 * other will be chosen
516 */ 495 */
517void pl08x_choose_master_bus(struct pl08x_bus_data *src_bus, 496static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
518 struct pl08x_bus_data *dst_bus, struct pl08x_bus_data **mbus, 497 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl)
519 struct pl08x_bus_data **sbus, u32 cctl)
520{ 498{
521 if (!(cctl & PL080_CONTROL_DST_INCR)) { 499 if (!(cctl & PL080_CONTROL_DST_INCR)) {
522 *mbus = src_bus; 500 *mbus = &bd->srcbus;
523 *sbus = dst_bus; 501 *sbus = &bd->dstbus;
524 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { 502 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
525 *mbus = dst_bus; 503 *mbus = &bd->dstbus;
526 *sbus = src_bus; 504 *sbus = &bd->srcbus;
527 } else { 505 } else {
528 if (dst_bus->buswidth == 4) { 506 if (bd->dstbus.buswidth == 4) {
529 *mbus = dst_bus; 507 *mbus = &bd->dstbus;
530 *sbus = src_bus; 508 *sbus = &bd->srcbus;
531 } else if (src_bus->buswidth == 4) { 509 } else if (bd->srcbus.buswidth == 4) {
532 *mbus = src_bus; 510 *mbus = &bd->srcbus;
533 *sbus = dst_bus; 511 *sbus = &bd->dstbus;
534 } else if (dst_bus->buswidth == 2) { 512 } else if (bd->dstbus.buswidth == 2) {
535 *mbus = dst_bus; 513 *mbus = &bd->dstbus;
536 *sbus = src_bus; 514 *sbus = &bd->srcbus;
537 } else if (src_bus->buswidth == 2) { 515 } else if (bd->srcbus.buswidth == 2) {
538 *mbus = src_bus; 516 *mbus = &bd->srcbus;
539 *sbus = dst_bus; 517 *sbus = &bd->dstbus;
540 } else { 518 } else {
541 /* src_bus->buswidth == 1 */ 519 /* bd->srcbus.buswidth == 1 */
542 *mbus = dst_bus; 520 *mbus = &bd->dstbus;
543 *sbus = src_bus; 521 *sbus = &bd->srcbus;
544 } 522 }
545 } 523 }
546} 524}
547 525
548/* 526/*
549 * Fills in one LLI for a certain transfer descriptor 527 * Fills in one LLI for a certain transfer descriptor and advance the counter
550 * and advance the counter
551 */ 528 */
552int pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x, 529static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
553 struct pl08x_txd *txd, int num_llis, int len, 530 int num_llis, int len, u32 cctl)
554 u32 cctl, u32 *remainder)
555{ 531{
556 struct lli *llis_va = txd->llis_va; 532 struct pl08x_lli *llis_va = bd->txd->llis_va;
557 struct lli *llis_bus = (struct lli *) txd->llis_bus; 533 dma_addr_t llis_bus = bd->txd->llis_bus;
558 534
559 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); 535 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
560 536
561 llis_va[num_llis].cctl = cctl; 537 llis_va[num_llis].cctl = cctl;
562 llis_va[num_llis].src = txd->srcbus.addr; 538 llis_va[num_llis].src = bd->srcbus.addr;
563 llis_va[num_llis].dst = txd->dstbus.addr; 539 llis_va[num_llis].dst = bd->dstbus.addr;
564 540 llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli);
565 /* 541 if (bd->pl08x->lli_buses & PL08X_AHB2)
566 * On versions with dual masters, you can optionally AND on 542 llis_va[num_llis].lli |= PL080_LLI_LM_AHB2;
567 * PL080_LLI_LM_AHB2 to the LLI to tell the hardware to read
568 * in new LLIs with that controller, but we always try to
569 * choose AHB1 to point into memory. The idea is to have AHB2
570 * fixed on the peripheral and AHB1 messing around in the
571 * memory. So we don't manipulate this bit currently.
572 */
573
574 llis_va[num_llis].next =
575 (dma_addr_t)((u32) &(llis_bus[num_llis + 1]));
576 543
577 if (cctl & PL080_CONTROL_SRC_INCR) 544 if (cctl & PL080_CONTROL_SRC_INCR)
578 txd->srcbus.addr += len; 545 bd->srcbus.addr += len;
579 if (cctl & PL080_CONTROL_DST_INCR) 546 if (cctl & PL080_CONTROL_DST_INCR)
580 txd->dstbus.addr += len; 547 bd->dstbus.addr += len;
581 548
582 *remainder -= len; 549 BUG_ON(bd->remainder < len);
583 550
584 return num_llis + 1; 551 bd->remainder -= len;
585} 552}
586 553
587/* 554/*
588 * Return number of bytes to fill to boundary, or len 555 * Return number of bytes to fill to boundary, or len.
556 * This calculation works for any value of addr.
589 */ 557 */
590static inline u32 pl08x_pre_boundary(u32 addr, u32 len) 558static inline size_t pl08x_pre_boundary(u32 addr, size_t len)
591{ 559{
592 u32 boundary; 560 size_t boundary_len = PL08X_BOUNDARY_SIZE -
593 561 (addr & (PL08X_BOUNDARY_SIZE - 1));
594 boundary = ((addr >> PL08X_BOUNDARY_SHIFT) + 1)
595 << PL08X_BOUNDARY_SHIFT;
596 562
597 if (boundary < addr + len) 563 return min(boundary_len, len);
598 return boundary - addr;
599 else
600 return len;
601} 564}
602 565
603/* 566/*
@@ -608,20 +571,13 @@ static inline u32 pl08x_pre_boundary(u32 addr, u32 len)
608static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, 571static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
609 struct pl08x_txd *txd) 572 struct pl08x_txd *txd)
610{ 573{
611 struct pl08x_channel_data *cd = txd->cd;
612 struct pl08x_bus_data *mbus, *sbus; 574 struct pl08x_bus_data *mbus, *sbus;
613 u32 remainder; 575 struct pl08x_lli_build_data bd;
614 int num_llis = 0; 576 int num_llis = 0;
615 u32 cctl; 577 u32 cctl;
616 int max_bytes_per_lli; 578 size_t max_bytes_per_lli;
617 int total_bytes = 0; 579 size_t total_bytes = 0;
618 struct lli *llis_va; 580 struct pl08x_lli *llis_va;
619 struct lli *llis_bus;
620
621 if (!txd) {
622 dev_err(&pl08x->adev->dev, "%s no descriptor\n", __func__);
623 return 0;
624 }
625 581
626 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, 582 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT,
627 &txd->llis_bus); 583 &txd->llis_bus);
@@ -632,121 +588,79 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
632 588
633 pl08x->pool_ctr++; 589 pl08x->pool_ctr++;
634 590
635 /* 591 /* Get the default CCTL */
636 * Initialize bus values for this transfer 592 cctl = txd->cctl;
637 * from the passed optimal values
638 */
639 if (!cd) {
640 dev_err(&pl08x->adev->dev, "%s no channel data\n", __func__);
641 return 0;
642 }
643 593
644 /* Get the default CCTL from the platform data */ 594 bd.txd = txd;
645 cctl = cd->cctl; 595 bd.pl08x = pl08x;
646 596 bd.srcbus.addr = txd->src_addr;
647 /* 597 bd.dstbus.addr = txd->dst_addr;
648 * On the PL080 we have two bus masters and we
649 * should select one for source and one for
650 * destination. We try to use AHB2 for the
651 * bus which does not increment (typically the
652 * peripheral) else we just choose something.
653 */
654 cctl &= ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
655 if (pl08x->vd->dualmaster) {
656 if (cctl & PL080_CONTROL_SRC_INCR)
657 /* Source increments, use AHB2 for destination */
658 cctl |= PL080_CONTROL_DST_AHB2;
659 else if (cctl & PL080_CONTROL_DST_INCR)
660 /* Destination increments, use AHB2 for source */
661 cctl |= PL080_CONTROL_SRC_AHB2;
662 else
663 /* Just pick something, source AHB1 dest AHB2 */
664 cctl |= PL080_CONTROL_DST_AHB2;
665 }
666 598
667 /* Find maximum width of the source bus */ 599 /* Find maximum width of the source bus */
668 txd->srcbus.maxwidth = 600 bd.srcbus.maxwidth =
669 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >> 601 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >>
670 PL080_CONTROL_SWIDTH_SHIFT); 602 PL080_CONTROL_SWIDTH_SHIFT);
671 603
672 /* Find maximum width of the destination bus */ 604 /* Find maximum width of the destination bus */
673 txd->dstbus.maxwidth = 605 bd.dstbus.maxwidth =
674 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> 606 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
675 PL080_CONTROL_DWIDTH_SHIFT); 607 PL080_CONTROL_DWIDTH_SHIFT);
676 608
677 /* Set up the bus widths to the maximum */ 609 /* Set up the bus widths to the maximum */
678 txd->srcbus.buswidth = txd->srcbus.maxwidth; 610 bd.srcbus.buswidth = bd.srcbus.maxwidth;
679 txd->dstbus.buswidth = txd->dstbus.maxwidth; 611 bd.dstbus.buswidth = bd.dstbus.maxwidth;
680 dev_vdbg(&pl08x->adev->dev, 612 dev_vdbg(&pl08x->adev->dev,
681 "%s source bus is %d bytes wide, dest bus is %d bytes wide\n", 613 "%s source bus is %d bytes wide, dest bus is %d bytes wide\n",
682 __func__, txd->srcbus.buswidth, txd->dstbus.buswidth); 614 __func__, bd.srcbus.buswidth, bd.dstbus.buswidth);
683 615
684 616
685 /* 617 /*
686 * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) 618 * Bytes transferred == tsize * MIN(buswidths), not max(buswidths)
687 */ 619 */
688 max_bytes_per_lli = min(txd->srcbus.buswidth, txd->dstbus.buswidth) * 620 max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) *
689 PL080_CONTROL_TRANSFER_SIZE_MASK; 621 PL080_CONTROL_TRANSFER_SIZE_MASK;
690 dev_vdbg(&pl08x->adev->dev, 622 dev_vdbg(&pl08x->adev->dev,
691 "%s max bytes per lli = %d\n", 623 "%s max bytes per lli = %zu\n",
692 __func__, max_bytes_per_lli); 624 __func__, max_bytes_per_lli);
693 625
694 /* We need to count this down to zero */ 626 /* We need to count this down to zero */
695 remainder = txd->len; 627 bd.remainder = txd->len;
696 dev_vdbg(&pl08x->adev->dev, 628 dev_vdbg(&pl08x->adev->dev,
697 "%s remainder = %d\n", 629 "%s remainder = %zu\n",
698 __func__, remainder); 630 __func__, bd.remainder);
699 631
700 /* 632 /*
701 * Choose bus to align to 633 * Choose bus to align to
702 * - prefers destination bus if both available 634 * - prefers destination bus if both available
703 * - if fixed address on one bus chooses other 635 * - if fixed address on one bus chooses other
704 * - modifies cctl to choose an apropriate master
705 */
706 pl08x_choose_master_bus(&txd->srcbus, &txd->dstbus,
707 &mbus, &sbus, cctl);
708
709
710 /*
711 * The lowest bit of the LLI register
712 * is also used to indicate which master to
713 * use for reading the LLIs.
714 */ 636 */
637 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
715 638
716 if (txd->len < mbus->buswidth) { 639 if (txd->len < mbus->buswidth) {
717 /* 640 /* Less than a bus width available - send as single bytes */
718 * Less than a bus width available 641 while (bd.remainder) {
719 * - send as single bytes
720 */
721 while (remainder) {
722 dev_vdbg(&pl08x->adev->dev, 642 dev_vdbg(&pl08x->adev->dev,
723 "%s single byte LLIs for a transfer of " 643 "%s single byte LLIs for a transfer of "
724 "less than a bus width (remain %08x)\n", 644 "less than a bus width (remain 0x%08x)\n",
725 __func__, remainder); 645 __func__, bd.remainder);
726 cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 646 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
727 num_llis = 647 pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
728 pl08x_fill_lli_for_desc(pl08x, txd, num_llis, 1,
729 cctl, &remainder);
730 total_bytes++; 648 total_bytes++;
731 } 649 }
732 } else { 650 } else {
733 /* 651 /* Make one byte LLIs until master bus is aligned */
734 * Make one byte LLIs until master bus is aligned
735 * - slave will then be aligned also
736 */
737 while ((mbus->addr) % (mbus->buswidth)) { 652 while ((mbus->addr) % (mbus->buswidth)) {
738 dev_vdbg(&pl08x->adev->dev, 653 dev_vdbg(&pl08x->adev->dev,
739 "%s adjustment lli for less than bus width " 654 "%s adjustment lli for less than bus width "
740 "(remain %08x)\n", 655 "(remain 0x%08x)\n",
741 __func__, remainder); 656 __func__, bd.remainder);
742 cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 657 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
743 num_llis = pl08x_fill_lli_for_desc 658 pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
744 (pl08x, txd, num_llis, 1, cctl, &remainder);
745 total_bytes++; 659 total_bytes++;
746 } 660 }
747 661
748 /* 662 /*
749 * Master now aligned 663 * Master now aligned
750 * - if slave is not then we must set its width down 664 * - if slave is not then we must set its width down
751 */ 665 */
752 if (sbus->addr % sbus->buswidth) { 666 if (sbus->addr % sbus->buswidth) {
@@ -761,63 +675,51 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
761 * Make largest possible LLIs until less than one bus 675 * Make largest possible LLIs until less than one bus
762 * width left 676 * width left
763 */ 677 */
764 while (remainder > (mbus->buswidth - 1)) { 678 while (bd.remainder > (mbus->buswidth - 1)) {
765 int lli_len, target_len; 679 size_t lli_len, target_len, tsize, odd_bytes;
766 int tsize;
767 int odd_bytes;
768 680
769 /* 681 /*
770 * If enough left try to send max possible, 682 * If enough left try to send max possible,
771 * otherwise try to send the remainder 683 * otherwise try to send the remainder
772 */ 684 */
773 target_len = remainder; 685 target_len = min(bd.remainder, max_bytes_per_lli);
774 if (remainder > max_bytes_per_lli)
775 target_len = max_bytes_per_lli;
776 686
777 /* 687 /*
778 * Set bus lengths for incrementing busses 688 * Set bus lengths for incrementing buses to the
779 * to number of bytes which fill to next memory 689 * number of bytes which fill to next memory boundary,
780 * boundary 690 * limiting on the target length calculated above.
781 */ 691 */
782 if (cctl & PL080_CONTROL_SRC_INCR) 692 if (cctl & PL080_CONTROL_SRC_INCR)
783 txd->srcbus.fill_bytes = 693 bd.srcbus.fill_bytes =
784 pl08x_pre_boundary( 694 pl08x_pre_boundary(bd.srcbus.addr,
785 txd->srcbus.addr, 695 target_len);
786 remainder);
787 else 696 else
788 txd->srcbus.fill_bytes = 697 bd.srcbus.fill_bytes = target_len;
789 max_bytes_per_lli;
790 698
791 if (cctl & PL080_CONTROL_DST_INCR) 699 if (cctl & PL080_CONTROL_DST_INCR)
792 txd->dstbus.fill_bytes = 700 bd.dstbus.fill_bytes =
793 pl08x_pre_boundary( 701 pl08x_pre_boundary(bd.dstbus.addr,
794 txd->dstbus.addr, 702 target_len);
795 remainder);
796 else 703 else
797 txd->dstbus.fill_bytes = 704 bd.dstbus.fill_bytes = target_len;
798 max_bytes_per_lli;
799 705
800 /* 706 /* Find the nearest */
801 * Find the nearest 707 lli_len = min(bd.srcbus.fill_bytes,
802 */ 708 bd.dstbus.fill_bytes);
803 lli_len = min(txd->srcbus.fill_bytes,
804 txd->dstbus.fill_bytes);
805 709
806 BUG_ON(lli_len > remainder); 710 BUG_ON(lli_len > bd.remainder);
807 711
808 if (lli_len <= 0) { 712 if (lli_len <= 0) {
809 dev_err(&pl08x->adev->dev, 713 dev_err(&pl08x->adev->dev,
810 "%s lli_len is %d, <= 0\n", 714 "%s lli_len is %zu, <= 0\n",
811 __func__, lli_len); 715 __func__, lli_len);
812 return 0; 716 return 0;
813 } 717 }
814 718
815 if (lli_len == target_len) { 719 if (lli_len == target_len) {
816 /* 720 /*
817 * Can send what we wanted 721 * Can send what we wanted.
818 */ 722 * Maintain alignment
819 /*
820 * Maintain alignment
821 */ 723 */
822 lli_len = (lli_len/mbus->buswidth) * 724 lli_len = (lli_len/mbus->buswidth) *
823 mbus->buswidth; 725 mbus->buswidth;
@@ -825,17 +727,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
825 } else { 727 } else {
826 /* 728 /*
827 * So now we know how many bytes to transfer 729 * So now we know how many bytes to transfer
828 * to get to the nearest boundary 730 * to get to the nearest boundary. The next
829 * The next lli will past the boundary 731 * LLI will past the boundary. However, we
830 * - however we may be working to a boundary 732 * may be working to a boundary on the slave
831 * on the slave bus 733 * bus. We need to ensure the master stays
832 * We need to ensure the master stays aligned 734 * aligned, and that we are working in
735 * multiples of the bus widths.
833 */ 736 */
834 odd_bytes = lli_len % mbus->buswidth; 737 odd_bytes = lli_len % mbus->buswidth;
835 /*
836 * - and that we are working in multiples
837 * of the bus widths
838 */
839 lli_len -= odd_bytes; 738 lli_len -= odd_bytes;
840 739
841 } 740 }
@@ -855,41 +754,38 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
855 754
856 if (target_len != lli_len) { 755 if (target_len != lli_len) {
857 dev_vdbg(&pl08x->adev->dev, 756 dev_vdbg(&pl08x->adev->dev,
858 "%s can't send what we want. Desired %08x, lli of %08x bytes in txd of %08x\n", 757 "%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n",
859 __func__, target_len, lli_len, txd->len); 758 __func__, target_len, lli_len, txd->len);
860 } 759 }
861 760
862 cctl = pl08x_cctl_bits(cctl, 761 cctl = pl08x_cctl_bits(cctl,
863 txd->srcbus.buswidth, 762 bd.srcbus.buswidth,
864 txd->dstbus.buswidth, 763 bd.dstbus.buswidth,
865 tsize); 764 tsize);
866 765
867 dev_vdbg(&pl08x->adev->dev, 766 dev_vdbg(&pl08x->adev->dev,
868 "%s fill lli with single lli chunk of size %08x (remainder %08x)\n", 767 "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n",
869 __func__, lli_len, remainder); 768 __func__, lli_len, bd.remainder);
870 num_llis = pl08x_fill_lli_for_desc(pl08x, txd, 769 pl08x_fill_lli_for_desc(&bd, num_llis++,
871 num_llis, lli_len, cctl, 770 lli_len, cctl);
872 &remainder);
873 total_bytes += lli_len; 771 total_bytes += lli_len;
874 } 772 }
875 773
876 774
877 if (odd_bytes) { 775 if (odd_bytes) {
878 /* 776 /*
879 * Creep past the boundary, 777 * Creep past the boundary, maintaining
880 * maintaining master alignment 778 * master alignment
881 */ 779 */
882 int j; 780 int j;
883 for (j = 0; (j < mbus->buswidth) 781 for (j = 0; (j < mbus->buswidth)
884 && (remainder); j++) { 782 && (bd.remainder); j++) {
885 cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 783 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
886 dev_vdbg(&pl08x->adev->dev, 784 dev_vdbg(&pl08x->adev->dev,
887 "%s align with boundardy, single byte (remain %08x)\n", 785 "%s align with boundary, single byte (remain 0x%08zx)\n",
888 __func__, remainder); 786 __func__, bd.remainder);
889 num_llis = 787 pl08x_fill_lli_for_desc(&bd,
890 pl08x_fill_lli_for_desc(pl08x, 788 num_llis++, 1, cctl);
891 txd, num_llis, 1,
892 cctl, &remainder);
893 total_bytes++; 789 total_bytes++;
894 } 790 }
895 } 791 }
@@ -898,25 +794,18 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
898 /* 794 /*
899 * Send any odd bytes 795 * Send any odd bytes
900 */ 796 */
901 if (remainder < 0) { 797 while (bd.remainder) {
902 dev_err(&pl08x->adev->dev, "%s remainder not fitted 0x%08x bytes\n",
903 __func__, remainder);
904 return 0;
905 }
906
907 while (remainder) {
908 cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 798 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
909 dev_vdbg(&pl08x->adev->dev, 799 dev_vdbg(&pl08x->adev->dev,
910 "%s align with boundardy, single odd byte (remain %d)\n", 800 "%s align with boundary, single odd byte (remain %zu)\n",
911 __func__, remainder); 801 __func__, bd.remainder);
912 num_llis = pl08x_fill_lli_for_desc(pl08x, txd, num_llis, 802 pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
913 1, cctl, &remainder);
914 total_bytes++; 803 total_bytes++;
915 } 804 }
916 } 805 }
917 if (total_bytes != txd->len) { 806 if (total_bytes != txd->len) {
918 dev_err(&pl08x->adev->dev, 807 dev_err(&pl08x->adev->dev,
919 "%s size of encoded lli:s don't match total txd, transferred 0x%08x from size 0x%08x\n", 808 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
920 __func__, total_bytes, txd->len); 809 __func__, total_bytes, txd->len);
921 return 0; 810 return 0;
922 } 811 }
@@ -927,41 +816,12 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
927 __func__, (u32) MAX_NUM_TSFR_LLIS); 816 __func__, (u32) MAX_NUM_TSFR_LLIS);
928 return 0; 817 return 0;
929 } 818 }
930 /*
931 * Decide whether this is a loop or a terminated transfer
932 */
933 llis_va = txd->llis_va;
934 llis_bus = (struct lli *) txd->llis_bus;
935 819
936 if (cd->circular_buffer) { 820 llis_va = txd->llis_va;
937 /* 821 /* The final LLI terminates the LLI. */
938 * Loop the circular buffer so that the next element 822 llis_va[num_llis - 1].lli = 0;
939 * points back to the beginning of the LLI. 823 /* The final LLI element shall also fire an interrupt. */
940 */ 824 llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
941 llis_va[num_llis - 1].next =
942 (dma_addr_t)((unsigned int)&(llis_bus[0]));
943 } else {
944 /*
945 * On non-circular buffers, the final LLI terminates
946 * the LLI.
947 */
948 llis_va[num_llis - 1].next = 0;
949 /*
950 * The final LLI element shall also fire an interrupt
951 */
952 llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
953 }
954
955 /* Now store the channel register values */
956 txd->csrc = llis_va[0].src;
957 txd->cdst = llis_va[0].dst;
958 if (num_llis > 1)
959 txd->clli = llis_va[0].next;
960 else
961 txd->clli = 0;
962
963 txd->cctl = llis_va[0].cctl;
964 /* ccfg will be set at physical channel allocation time */
965 825
966#ifdef VERBOSE_DEBUG 826#ifdef VERBOSE_DEBUG
967 { 827 {
@@ -969,13 +829,13 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
969 829
970 for (i = 0; i < num_llis; i++) { 830 for (i = 0; i < num_llis; i++) {
971 dev_vdbg(&pl08x->adev->dev, 831 dev_vdbg(&pl08x->adev->dev,
972 "lli %d @%p: csrc=%08x, cdst=%08x, cctl=%08x, clli=%08x\n", 832 "lli %d @%p: csrc=0x%08x, cdst=0x%08x, cctl=0x%08x, clli=0x%08x\n",
973 i, 833 i,
974 &llis_va[i], 834 &llis_va[i],
975 llis_va[i].src, 835 llis_va[i].src,
976 llis_va[i].dst, 836 llis_va[i].dst,
977 llis_va[i].cctl, 837 llis_va[i].cctl,
978 llis_va[i].next 838 llis_va[i].lli
979 ); 839 );
980 } 840 }
981 } 841 }
@@ -988,14 +848,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
988static void pl08x_free_txd(struct pl08x_driver_data *pl08x, 848static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
989 struct pl08x_txd *txd) 849 struct pl08x_txd *txd)
990{ 850{
991 if (!txd)
992 dev_err(&pl08x->adev->dev,
993 "%s no descriptor to free\n",
994 __func__);
995
996 /* Free the LLI */ 851 /* Free the LLI */
997 dma_pool_free(pl08x->pool, txd->llis_va, 852 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
998 txd->llis_bus);
999 853
1000 pl08x->pool_ctr--; 854 pl08x->pool_ctr--;
1001 855
@@ -1008,13 +862,12 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
1008 struct pl08x_txd *txdi = NULL; 862 struct pl08x_txd *txdi = NULL;
1009 struct pl08x_txd *next; 863 struct pl08x_txd *next;
1010 864
1011 if (!list_empty(&plchan->desc_list)) { 865 if (!list_empty(&plchan->pend_list)) {
1012 list_for_each_entry_safe(txdi, 866 list_for_each_entry_safe(txdi,
1013 next, &plchan->desc_list, node) { 867 next, &plchan->pend_list, node) {
1014 list_del(&txdi->node); 868 list_del(&txdi->node);
1015 pl08x_free_txd(pl08x, txdi); 869 pl08x_free_txd(pl08x, txdi);
1016 } 870 }
1017
1018 } 871 }
1019} 872}
1020 873
@@ -1069,6 +922,12 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
1069 return -EBUSY; 922 return -EBUSY;
1070 } 923 }
1071 ch->signal = ret; 924 ch->signal = ret;
925
926 /* Assign the flow control signal to this channel */
927 if (txd->direction == DMA_TO_DEVICE)
928 txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
929 else if (txd->direction == DMA_FROM_DEVICE)
930 txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
1072 } 931 }
1073 932
1074 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", 933 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
@@ -1076,19 +935,54 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
1076 ch->signal, 935 ch->signal,
1077 plchan->name); 936 plchan->name);
1078 937
938 plchan->phychan_hold++;
1079 plchan->phychan = ch; 939 plchan->phychan = ch;
1080 940
1081 return 0; 941 return 0;
1082} 942}
1083 943
944static void release_phy_channel(struct pl08x_dma_chan *plchan)
945{
946 struct pl08x_driver_data *pl08x = plchan->host;
947
948 if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) {
949 pl08x->pd->put_signal(plchan);
950 plchan->phychan->signal = -1;
951 }
952 pl08x_put_phy_channel(pl08x, plchan->phychan);
953 plchan->phychan = NULL;
954}
955
1084static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) 956static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
1085{ 957{
1086 struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); 958 struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
959 struct pl08x_txd *txd = to_pl08x_txd(tx);
960 unsigned long flags;
1087 961
1088 atomic_inc(&plchan->last_issued); 962 spin_lock_irqsave(&plchan->lock, flags);
1089 tx->cookie = atomic_read(&plchan->last_issued); 963
1090 /* This unlock follows the lock in the prep() function */ 964 plchan->chan.cookie += 1;
1091 spin_unlock_irqrestore(&plchan->lock, plchan->lockflags); 965 if (plchan->chan.cookie < 0)
966 plchan->chan.cookie = 1;
967 tx->cookie = plchan->chan.cookie;
968
969 /* Put this onto the pending list */
970 list_add_tail(&txd->node, &plchan->pend_list);
971
972 /*
973 * If there was no physical channel available for this memcpy,
974 * stack the request up and indicate that the channel is waiting
975 * for a free physical channel.
976 */
977 if (!plchan->slave && !plchan->phychan) {
978 /* Do this memcpy whenever there is a channel ready */
979 plchan->state = PL08X_CHAN_WAITING;
980 plchan->waiting = txd;
981 } else {
982 plchan->phychan_hold--;
983 }
984
985 spin_unlock_irqrestore(&plchan->lock, flags);
1092 986
1093 return tx->cookie; 987 return tx->cookie;
1094} 988}
@@ -1102,10 +996,9 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
1102} 996}
1103 997
1104/* 998/*
1105 * Code accessing dma_async_is_complete() in a tight loop 999 * Code accessing dma_async_is_complete() in a tight loop may give problems.
1106 * may give problems - could schedule where indicated. 1000 * If slaves are relying on interrupts to signal completion this function
1107 * If slaves are relying on interrupts to signal completion this 1001 * must not be called with interrupts disabled.
1108 * function must not be called with interrupts disabled
1109 */ 1002 */
1110static enum dma_status 1003static enum dma_status
1111pl08x_dma_tx_status(struct dma_chan *chan, 1004pl08x_dma_tx_status(struct dma_chan *chan,
@@ -1118,7 +1011,7 @@ pl08x_dma_tx_status(struct dma_chan *chan,
1118 enum dma_status ret; 1011 enum dma_status ret;
1119 u32 bytesleft = 0; 1012 u32 bytesleft = 0;
1120 1013
1121 last_used = atomic_read(&plchan->last_issued); 1014 last_used = plchan->chan.cookie;
1122 last_complete = plchan->lc; 1015 last_complete = plchan->lc;
1123 1016
1124 ret = dma_async_is_complete(cookie, last_complete, last_used); 1017 ret = dma_async_is_complete(cookie, last_complete, last_used);
@@ -1128,13 +1021,9 @@ pl08x_dma_tx_status(struct dma_chan *chan,
1128 } 1021 }
1129 1022
1130 /* 1023 /*
1131 * schedule(); could be inserted here
1132 */
1133
1134 /*
1135 * This cookie not complete yet 1024 * This cookie not complete yet
1136 */ 1025 */
1137 last_used = atomic_read(&plchan->last_issued); 1026 last_used = plchan->chan.cookie;
1138 last_complete = plchan->lc; 1027 last_complete = plchan->lc;
1139 1028
1140 /* Get number of bytes left in the active transactions and queue */ 1029 /* Get number of bytes left in the active transactions and queue */
@@ -1199,37 +1088,35 @@ static const struct burst_table burst_sizes[] = {
1199 }, 1088 },
1200}; 1089};
1201 1090
1202static void dma_set_runtime_config(struct dma_chan *chan, 1091static int dma_set_runtime_config(struct dma_chan *chan,
1203 struct dma_slave_config *config) 1092 struct dma_slave_config *config)
1204{ 1093{
1205 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1094 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1206 struct pl08x_driver_data *pl08x = plchan->host; 1095 struct pl08x_driver_data *pl08x = plchan->host;
1207 struct pl08x_channel_data *cd = plchan->cd; 1096 struct pl08x_channel_data *cd = plchan->cd;
1208 enum dma_slave_buswidth addr_width; 1097 enum dma_slave_buswidth addr_width;
1098 dma_addr_t addr;
1209 u32 maxburst; 1099 u32 maxburst;
1210 u32 cctl = 0; 1100 u32 cctl = 0;
1211 /* Mask out all except src and dst channel */ 1101 int i;
1212 u32 ccfg = cd->ccfg & 0x000003DEU; 1102
1213 int i = 0; 1103 if (!plchan->slave)
1104 return -EINVAL;
1214 1105
1215 /* Transfer direction */ 1106 /* Transfer direction */
1216 plchan->runtime_direction = config->direction; 1107 plchan->runtime_direction = config->direction;
1217 if (config->direction == DMA_TO_DEVICE) { 1108 if (config->direction == DMA_TO_DEVICE) {
1218 plchan->runtime_addr = config->dst_addr; 1109 addr = config->dst_addr;
1219 cctl |= PL080_CONTROL_SRC_INCR;
1220 ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1221 addr_width = config->dst_addr_width; 1110 addr_width = config->dst_addr_width;
1222 maxburst = config->dst_maxburst; 1111 maxburst = config->dst_maxburst;
1223 } else if (config->direction == DMA_FROM_DEVICE) { 1112 } else if (config->direction == DMA_FROM_DEVICE) {
1224 plchan->runtime_addr = config->src_addr; 1113 addr = config->src_addr;
1225 cctl |= PL080_CONTROL_DST_INCR;
1226 ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1227 addr_width = config->src_addr_width; 1114 addr_width = config->src_addr_width;
1228 maxburst = config->src_maxburst; 1115 maxburst = config->src_maxburst;
1229 } else { 1116 } else {
1230 dev_err(&pl08x->adev->dev, 1117 dev_err(&pl08x->adev->dev,
1231 "bad runtime_config: alien transfer direction\n"); 1118 "bad runtime_config: alien transfer direction\n");
1232 return; 1119 return -EINVAL;
1233 } 1120 }
1234 1121
1235 switch (addr_width) { 1122 switch (addr_width) {
@@ -1248,42 +1135,40 @@ static void dma_set_runtime_config(struct dma_chan *chan,
1248 default: 1135 default:
1249 dev_err(&pl08x->adev->dev, 1136 dev_err(&pl08x->adev->dev,
1250 "bad runtime_config: alien address width\n"); 1137 "bad runtime_config: alien address width\n");
1251 return; 1138 return -EINVAL;
1252 } 1139 }
1253 1140
1254 /* 1141 /*
1255 * Now decide on a maxburst: 1142 * Now decide on a maxburst:
1256 * If this channel will only request single transfers, set 1143 * If this channel will only request single transfers, set this
1257 * this down to ONE element. 1144 * down to ONE element. Also select one element if no maxburst
1145 * is specified.
1258 */ 1146 */
1259 if (plchan->cd->single) { 1147 if (plchan->cd->single || maxburst == 0) {
1260 cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | 1148 cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) |
1261 (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT); 1149 (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT);
1262 } else { 1150 } else {
1263 while (i < ARRAY_SIZE(burst_sizes)) { 1151 for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
1264 if (burst_sizes[i].burstwords <= maxburst) 1152 if (burst_sizes[i].burstwords <= maxburst)
1265 break; 1153 break;
1266 i++;
1267 }
1268 cctl |= burst_sizes[i].reg; 1154 cctl |= burst_sizes[i].reg;
1269 } 1155 }
1270 1156
1271 /* Access the cell in privileged mode, non-bufferable, non-cacheable */ 1157 plchan->runtime_addr = addr;
1272 cctl &= ~PL080_CONTROL_PROT_MASK;
1273 cctl |= PL080_CONTROL_PROT_SYS;
1274 1158
1275 /* Modify the default channel data to fit PrimeCell request */ 1159 /* Modify the default channel data to fit PrimeCell request */
1276 cd->cctl = cctl; 1160 cd->cctl = cctl;
1277 cd->ccfg = ccfg;
1278 1161
1279 dev_dbg(&pl08x->adev->dev, 1162 dev_dbg(&pl08x->adev->dev,
1280 "configured channel %s (%s) for %s, data width %d, " 1163 "configured channel %s (%s) for %s, data width %d, "
1281 "maxburst %d words, LE, CCTL=%08x, CCFG=%08x\n", 1164 "maxburst %d words, LE, CCTL=0x%08x\n",
1282 dma_chan_name(chan), plchan->name, 1165 dma_chan_name(chan), plchan->name,
1283 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", 1166 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
1284 addr_width, 1167 addr_width,
1285 maxburst, 1168 maxburst,
1286 cctl, ccfg); 1169 cctl);
1170
1171 return 0;
1287} 1172}
1288 1173
1289/* 1174/*
@@ -1293,35 +1178,26 @@ static void dma_set_runtime_config(struct dma_chan *chan,
1293static void pl08x_issue_pending(struct dma_chan *chan) 1178static void pl08x_issue_pending(struct dma_chan *chan)
1294{ 1179{
1295 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1180 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1296 struct pl08x_driver_data *pl08x = plchan->host;
1297 unsigned long flags; 1181 unsigned long flags;
1298 1182
1299 spin_lock_irqsave(&plchan->lock, flags); 1183 spin_lock_irqsave(&plchan->lock, flags);
1300 /* Something is already active */ 1184 /* Something is already active, or we're waiting for a channel... */
1301 if (plchan->at) { 1185 if (plchan->at || plchan->state == PL08X_CHAN_WAITING) {
1302 spin_unlock_irqrestore(&plchan->lock, flags); 1186 spin_unlock_irqrestore(&plchan->lock, flags);
1303 return;
1304 }
1305
1306 /* Didn't get a physical channel so waiting for it ... */
1307 if (plchan->state == PL08X_CHAN_WAITING)
1308 return; 1187 return;
1188 }
1309 1189
1310 /* Take the first element in the queue and execute it */ 1190 /* Take the first element in the queue and execute it */
1311 if (!list_empty(&plchan->desc_list)) { 1191 if (!list_empty(&plchan->pend_list)) {
1312 struct pl08x_txd *next; 1192 struct pl08x_txd *next;
1313 1193
1314 next = list_first_entry(&plchan->desc_list, 1194 next = list_first_entry(&plchan->pend_list,
1315 struct pl08x_txd, 1195 struct pl08x_txd,
1316 node); 1196 node);
1317 list_del(&next->node); 1197 list_del(&next->node);
1318 plchan->at = next;
1319 plchan->state = PL08X_CHAN_RUNNING; 1198 plchan->state = PL08X_CHAN_RUNNING;
1320 1199
1321 /* Configure the physical channel for the active txd */ 1200 pl08x_start_txd(plchan, next);
1322 pl08x_config_phychan_for_txd(plchan);
1323 pl08x_set_cregs(pl08x, plchan->phychan);
1324 pl08x_enable_phy_chan(pl08x, plchan->phychan);
1325 } 1201 }
1326 1202
1327 spin_unlock_irqrestore(&plchan->lock, flags); 1203 spin_unlock_irqrestore(&plchan->lock, flags);
@@ -1330,30 +1206,17 @@ static void pl08x_issue_pending(struct dma_chan *chan)
1330static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, 1206static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1331 struct pl08x_txd *txd) 1207 struct pl08x_txd *txd)
1332{ 1208{
1333 int num_llis;
1334 struct pl08x_driver_data *pl08x = plchan->host; 1209 struct pl08x_driver_data *pl08x = plchan->host;
1335 int ret; 1210 unsigned long flags;
1211 int num_llis, ret;
1336 1212
1337 num_llis = pl08x_fill_llis_for_desc(pl08x, txd); 1213 num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
1338 1214 if (!num_llis) {
1339 if (!num_llis) 1215 kfree(txd);
1340 return -EINVAL; 1216 return -EINVAL;
1217 }
1341 1218
1342 spin_lock_irqsave(&plchan->lock, plchan->lockflags); 1219 spin_lock_irqsave(&plchan->lock, flags);
1343
1344 /*
1345 * If this device is not using a circular buffer then
1346 * queue this new descriptor for transfer.
1347 * The descriptor for a circular buffer continues
1348 * to be used until the channel is freed.
1349 */
1350 if (txd->cd->circular_buffer)
1351 dev_err(&pl08x->adev->dev,
1352 "%s attempting to queue a circular buffer\n",
1353 __func__);
1354 else
1355 list_add_tail(&txd->node,
1356 &plchan->desc_list);
1357 1220
1358 /* 1221 /*
1359 * See if we already have a physical channel allocated, 1222 * See if we already have a physical channel allocated,
@@ -1362,45 +1225,74 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1362 ret = prep_phy_channel(plchan, txd); 1225 ret = prep_phy_channel(plchan, txd);
1363 if (ret) { 1226 if (ret) {
1364 /* 1227 /*
1365 * No physical channel available, we will 1228 * No physical channel was available.
1366 * stack up the memcpy channels until there is a channel 1229 *
1367 * available to handle it whereas slave transfers may 1230 * memcpy transfers can be sorted out at submission time.
1368 * have been denied due to platform channel muxing restrictions 1231 *
1369 * and since there is no guarantee that this will ever be 1232 * Slave transfers may have been denied due to platform
1370 * resolved, and since the signal must be aquired AFTER 1233 * channel muxing restrictions. Since there is no guarantee
1371 * aquiring the physical channel, we will let them be NACK:ed 1234 * that this will ever be resolved, and the signal must be
1372 * with -EBUSY here. The drivers can alway retry the prep() 1235 * acquired AFTER acquiring the physical channel, we will let
1373 * call if they are eager on doing this using DMA. 1236 * them be NACK:ed with -EBUSY here. The drivers can retry
1237 * the prep() call if they are eager on doing this using DMA.
1374 */ 1238 */
1375 if (plchan->slave) { 1239 if (plchan->slave) {
1376 pl08x_free_txd_list(pl08x, plchan); 1240 pl08x_free_txd_list(pl08x, plchan);
1377 spin_unlock_irqrestore(&plchan->lock, plchan->lockflags); 1241 pl08x_free_txd(pl08x, txd);
1242 spin_unlock_irqrestore(&plchan->lock, flags);
1378 return -EBUSY; 1243 return -EBUSY;
1379 } 1244 }
1380 /* Do this memcpy whenever there is a channel ready */
1381 plchan->state = PL08X_CHAN_WAITING;
1382 plchan->waiting = txd;
1383 } else 1245 } else
1384 /* 1246 /*
1385 * Else we're all set, paused and ready to roll, 1247 * Else we're all set, paused and ready to roll, status
1386 * status will switch to PL08X_CHAN_RUNNING when 1248 * will switch to PL08X_CHAN_RUNNING when we call
1387 * we call issue_pending(). If there is something 1249 * issue_pending(). If there is something running on the
1388 * running on the channel already we don't change 1250 * channel already we don't change its state.
1389 * its state.
1390 */ 1251 */
1391 if (plchan->state == PL08X_CHAN_IDLE) 1252 if (plchan->state == PL08X_CHAN_IDLE)
1392 plchan->state = PL08X_CHAN_PAUSED; 1253 plchan->state = PL08X_CHAN_PAUSED;
1393 1254
1394 /* 1255 spin_unlock_irqrestore(&plchan->lock, flags);
1395 * Notice that we leave plchan->lock locked on purpose:
1396 * it will be unlocked in the subsequent tx_submit()
1397 * call. This is a consequence of the current API.
1398 */
1399 1256
1400 return 0; 1257 return 0;
1401} 1258}
1402 1259
1403/* 1260/*
1261 * Given the source and destination available bus masks, select which
1262 * will be routed to each port. We try to have source and destination
1263 * on separate ports, but always respect the allowable settings.
1264 */
1265static u32 pl08x_select_bus(struct pl08x_driver_data *pl08x, u8 src, u8 dst)
1266{
1267 u32 cctl = 0;
1268
1269 if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
1270 cctl |= PL080_CONTROL_DST_AHB2;
1271 if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
1272 cctl |= PL080_CONTROL_SRC_AHB2;
1273
1274 return cctl;
1275}
1276
1277static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
1278 unsigned long flags)
1279{
1280 struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
1281
1282 if (txd) {
1283 dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
1284 txd->tx.flags = flags;
1285 txd->tx.tx_submit = pl08x_tx_submit;
1286 INIT_LIST_HEAD(&txd->node);
1287
1288 /* Always enable error and terminal interrupts */
1289 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
1290 PL080_CONFIG_TC_IRQ_MASK;
1291 }
1292 return txd;
1293}
1294
1295/*
1404 * Initialize a descriptor to be used by memcpy submit 1296 * Initialize a descriptor to be used by memcpy submit
1405 */ 1297 */
1406static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( 1298static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
@@ -1412,40 +1304,38 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1412 struct pl08x_txd *txd; 1304 struct pl08x_txd *txd;
1413 int ret; 1305 int ret;
1414 1306
1415 txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); 1307 txd = pl08x_get_txd(plchan, flags);
1416 if (!txd) { 1308 if (!txd) {
1417 dev_err(&pl08x->adev->dev, 1309 dev_err(&pl08x->adev->dev,
1418 "%s no memory for descriptor\n", __func__); 1310 "%s no memory for descriptor\n", __func__);
1419 return NULL; 1311 return NULL;
1420 } 1312 }
1421 1313
1422 dma_async_tx_descriptor_init(&txd->tx, chan);
1423 txd->direction = DMA_NONE; 1314 txd->direction = DMA_NONE;
1424 txd->srcbus.addr = src; 1315 txd->src_addr = src;
1425 txd->dstbus.addr = dest; 1316 txd->dst_addr = dest;
1317 txd->len = len;
1426 1318
1427 /* Set platform data for m2m */ 1319 /* Set platform data for m2m */
1428 txd->cd = &pl08x->pd->memcpy_channel; 1320 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1321 txd->cctl = pl08x->pd->memcpy_channel.cctl &
1322 ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
1323
1429 /* Both to be incremented or the code will break */ 1324 /* Both to be incremented or the code will break */
1430 txd->cd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; 1325 txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
1431 txd->tx.tx_submit = pl08x_tx_submit; 1326
1432 txd->tx.callback = NULL; 1327 if (pl08x->vd->dualmaster)
1433 txd->tx.callback_param = NULL; 1328 txd->cctl |= pl08x_select_bus(pl08x,
1434 txd->len = len; 1329 pl08x->mem_buses, pl08x->mem_buses);
1435 1330
1436 INIT_LIST_HEAD(&txd->node);
1437 ret = pl08x_prep_channel_resources(plchan, txd); 1331 ret = pl08x_prep_channel_resources(plchan, txd);
1438 if (ret) 1332 if (ret)
1439 return NULL; 1333 return NULL;
1440 /*
1441 * NB: the channel lock is held at this point so tx_submit()
1442 * must be called in direct succession.
1443 */
1444 1334
1445 return &txd->tx; 1335 return &txd->tx;
1446} 1336}
1447 1337
1448struct dma_async_tx_descriptor *pl08x_prep_slave_sg( 1338static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1449 struct dma_chan *chan, struct scatterlist *sgl, 1339 struct dma_chan *chan, struct scatterlist *sgl,
1450 unsigned int sg_len, enum dma_data_direction direction, 1340 unsigned int sg_len, enum dma_data_direction direction,
1451 unsigned long flags) 1341 unsigned long flags)
@@ -1453,6 +1343,7 @@ struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1453 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1343 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1454 struct pl08x_driver_data *pl08x = plchan->host; 1344 struct pl08x_driver_data *pl08x = plchan->host;
1455 struct pl08x_txd *txd; 1345 struct pl08x_txd *txd;
1346 u8 src_buses, dst_buses;
1456 int ret; 1347 int ret;
1457 1348
1458 /* 1349 /*
@@ -1467,14 +1358,12 @@ struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1467 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", 1358 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
1468 __func__, sgl->length, plchan->name); 1359 __func__, sgl->length, plchan->name);
1469 1360
1470 txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); 1361 txd = pl08x_get_txd(plchan, flags);
1471 if (!txd) { 1362 if (!txd) {
1472 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); 1363 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
1473 return NULL; 1364 return NULL;
1474 } 1365 }
1475 1366
1476 dma_async_tx_descriptor_init(&txd->tx, chan);
1477
1478 if (direction != plchan->runtime_direction) 1367 if (direction != plchan->runtime_direction)
1479 dev_err(&pl08x->adev->dev, "%s DMA setup does not match " 1368 dev_err(&pl08x->adev->dev, "%s DMA setup does not match "
1480 "the direction configured for the PrimeCell\n", 1369 "the direction configured for the PrimeCell\n",
@@ -1486,37 +1375,47 @@ struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1486 * channel target address dynamically at runtime. 1375 * channel target address dynamically at runtime.
1487 */ 1376 */
1488 txd->direction = direction; 1377 txd->direction = direction;
1378 txd->len = sgl->length;
1379
1380 txd->cctl = plchan->cd->cctl &
1381 ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
1382 PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
1383 PL080_CONTROL_PROT_MASK);
1384
1385 /* Access the cell in privileged mode, non-bufferable, non-cacheable */
1386 txd->cctl |= PL080_CONTROL_PROT_SYS;
1387
1489 if (direction == DMA_TO_DEVICE) { 1388 if (direction == DMA_TO_DEVICE) {
1490 txd->srcbus.addr = sgl->dma_address; 1389 txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1390 txd->cctl |= PL080_CONTROL_SRC_INCR;
1391 txd->src_addr = sgl->dma_address;
1491 if (plchan->runtime_addr) 1392 if (plchan->runtime_addr)
1492 txd->dstbus.addr = plchan->runtime_addr; 1393 txd->dst_addr = plchan->runtime_addr;
1493 else 1394 else
1494 txd->dstbus.addr = plchan->cd->addr; 1395 txd->dst_addr = plchan->cd->addr;
1396 src_buses = pl08x->mem_buses;
1397 dst_buses = plchan->cd->periph_buses;
1495 } else if (direction == DMA_FROM_DEVICE) { 1398 } else if (direction == DMA_FROM_DEVICE) {
1399 txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1400 txd->cctl |= PL080_CONTROL_DST_INCR;
1496 if (plchan->runtime_addr) 1401 if (plchan->runtime_addr)
1497 txd->srcbus.addr = plchan->runtime_addr; 1402 txd->src_addr = plchan->runtime_addr;
1498 else 1403 else
1499 txd->srcbus.addr = plchan->cd->addr; 1404 txd->src_addr = plchan->cd->addr;
1500 txd->dstbus.addr = sgl->dma_address; 1405 txd->dst_addr = sgl->dma_address;
1406 src_buses = plchan->cd->periph_buses;
1407 dst_buses = pl08x->mem_buses;
1501 } else { 1408 } else {
1502 dev_err(&pl08x->adev->dev, 1409 dev_err(&pl08x->adev->dev,
1503 "%s direction unsupported\n", __func__); 1410 "%s direction unsupported\n", __func__);
1504 return NULL; 1411 return NULL;
1505 } 1412 }
1506 txd->cd = plchan->cd; 1413
1507 txd->tx.tx_submit = pl08x_tx_submit; 1414 txd->cctl |= pl08x_select_bus(pl08x, src_buses, dst_buses);
1508 txd->tx.callback = NULL;
1509 txd->tx.callback_param = NULL;
1510 txd->len = sgl->length;
1511 INIT_LIST_HEAD(&txd->node);
1512 1415
1513 ret = pl08x_prep_channel_resources(plchan, txd); 1416 ret = pl08x_prep_channel_resources(plchan, txd);
1514 if (ret) 1417 if (ret)
1515 return NULL; 1418 return NULL;
1516 /*
1517 * NB: the channel lock is held at this point so tx_submit()
1518 * must be called in direct succession.
1519 */
1520 1419
1521 return &txd->tx; 1420 return &txd->tx;
1522} 1421}
@@ -1531,10 +1430,8 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1531 1430
1532 /* Controls applicable to inactive channels */ 1431 /* Controls applicable to inactive channels */
1533 if (cmd == DMA_SLAVE_CONFIG) { 1432 if (cmd == DMA_SLAVE_CONFIG) {
1534 dma_set_runtime_config(chan, 1433 return dma_set_runtime_config(chan,
1535 (struct dma_slave_config *) 1434 (struct dma_slave_config *)arg);
1536 arg);
1537 return 0;
1538 } 1435 }
1539 1436
1540 /* 1437 /*
@@ -1558,16 +1455,8 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1558 * Mark physical channel as free and free any slave 1455 * Mark physical channel as free and free any slave
1559 * signal 1456 * signal
1560 */ 1457 */
1561 if ((plchan->phychan->signal >= 0) && 1458 release_phy_channel(plchan);
1562 pl08x->pd->put_signal) {
1563 pl08x->pd->put_signal(plchan);
1564 plchan->phychan->signal = -1;
1565 }
1566 pl08x_put_phy_channel(pl08x, plchan->phychan);
1567 plchan->phychan = NULL;
1568 } 1459 }
1569 /* Stop any pending tasklet */
1570 tasklet_disable(&plchan->tasklet);
1571 /* Dequeue jobs and free LLIs */ 1460 /* Dequeue jobs and free LLIs */
1572 if (plchan->at) { 1461 if (plchan->at) {
1573 pl08x_free_txd(pl08x, plchan->at); 1462 pl08x_free_txd(pl08x, plchan->at);
@@ -1609,10 +1498,9 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
1609 1498
1610/* 1499/*
1611 * Just check that the device is there and active 1500 * Just check that the device is there and active
1612 * TODO: turn this bit on/off depending on the number of 1501 * TODO: turn this bit on/off depending on the number of physical channels
1613 * physical channels actually used, if it is zero... well 1502 * actually used, if it is zero... well shut it off. That will save some
1614 * shut it off. That will save some power. Cut the clock 1503 * power. Cut the clock at the same time.
1615 * at the same time.
1616 */ 1504 */
1617static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) 1505static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
1618{ 1506{
@@ -1620,78 +1508,66 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
1620 1508
1621 val = readl(pl08x->base + PL080_CONFIG); 1509 val = readl(pl08x->base + PL080_CONFIG);
1622 val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE); 1510 val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE);
1623 /* We implictly clear bit 1 and that means little-endian mode */ 1511 /* We implicitly clear bit 1 and that means little-endian mode */
1624 val |= PL080_CONFIG_ENABLE; 1512 val |= PL080_CONFIG_ENABLE;
1625 writel(val, pl08x->base + PL080_CONFIG); 1513 writel(val, pl08x->base + PL080_CONFIG);
1626} 1514}
1627 1515
1516static void pl08x_unmap_buffers(struct pl08x_txd *txd)
1517{
1518 struct device *dev = txd->tx.chan->device->dev;
1519
1520 if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
1521 if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
1522 dma_unmap_single(dev, txd->src_addr, txd->len,
1523 DMA_TO_DEVICE);
1524 else
1525 dma_unmap_page(dev, txd->src_addr, txd->len,
1526 DMA_TO_DEVICE);
1527 }
1528 if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
1529 if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
1530 dma_unmap_single(dev, txd->dst_addr, txd->len,
1531 DMA_FROM_DEVICE);
1532 else
1533 dma_unmap_page(dev, txd->dst_addr, txd->len,
1534 DMA_FROM_DEVICE);
1535 }
1536}
1537
1628static void pl08x_tasklet(unsigned long data) 1538static void pl08x_tasklet(unsigned long data)
1629{ 1539{
1630 struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data; 1540 struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
1631 struct pl08x_phy_chan *phychan = plchan->phychan;
1632 struct pl08x_driver_data *pl08x = plchan->host; 1541 struct pl08x_driver_data *pl08x = plchan->host;
1542 struct pl08x_txd *txd;
1543 unsigned long flags;
1633 1544
1634 if (!plchan) 1545 spin_lock_irqsave(&plchan->lock, flags);
1635 BUG();
1636
1637 spin_lock(&plchan->lock);
1638
1639 if (plchan->at) {
1640 dma_async_tx_callback callback =
1641 plchan->at->tx.callback;
1642 void *callback_param =
1643 plchan->at->tx.callback_param;
1644
1645 /*
1646 * Update last completed
1647 */
1648 plchan->lc =
1649 (plchan->at->tx.cookie);
1650
1651 /*
1652 * Callback to signal completion
1653 */
1654 if (callback)
1655 callback(callback_param);
1656 1546
1657 /* 1547 txd = plchan->at;
1658 * Device callbacks should NOT clear 1548 plchan->at = NULL;
1659 * the current transaction on the channel
1660 * Linus: sometimes they should?
1661 */
1662 if (!plchan->at)
1663 BUG();
1664 1549
1665 /* 1550 if (txd) {
1666 * Free the descriptor if it's not for a device 1551 /* Update last completed */
1667 * using a circular buffer 1552 plchan->lc = txd->tx.cookie;
1668 */
1669 if (!plchan->at->cd->circular_buffer) {
1670 pl08x_free_txd(pl08x, plchan->at);
1671 plchan->at = NULL;
1672 }
1673 /*
1674 * else descriptor for circular
1675 * buffers only freed when
1676 * client has disabled dma
1677 */
1678 } 1553 }
1679 /* 1554
1680 * If a new descriptor is queued, set it up 1555 /* If a new descriptor is queued, set it up plchan->at is NULL here */
1681 * plchan->at is NULL here 1556 if (!list_empty(&plchan->pend_list)) {
1682 */
1683 if (!list_empty(&plchan->desc_list)) {
1684 struct pl08x_txd *next; 1557 struct pl08x_txd *next;
1685 1558
1686 next = list_first_entry(&plchan->desc_list, 1559 next = list_first_entry(&plchan->pend_list,
1687 struct pl08x_txd, 1560 struct pl08x_txd,
1688 node); 1561 node);
1689 list_del(&next->node); 1562 list_del(&next->node);
1690 plchan->at = next; 1563
1691 /* Configure the physical channel for the next txd */ 1564 pl08x_start_txd(plchan, next);
1692 pl08x_config_phychan_for_txd(plchan); 1565 } else if (plchan->phychan_hold) {
1693 pl08x_set_cregs(pl08x, plchan->phychan); 1566 /*
1694 pl08x_enable_phy_chan(pl08x, plchan->phychan); 1567 * This channel is still in use - we have a new txd being
1568 * prepared and will soon be queued. Don't give up the
1569 * physical channel.
1570 */
1695 } else { 1571 } else {
1696 struct pl08x_dma_chan *waiting = NULL; 1572 struct pl08x_dma_chan *waiting = NULL;
1697 1573
@@ -1699,20 +1575,14 @@ static void pl08x_tasklet(unsigned long data)
1699 * No more jobs, so free up the physical channel 1575 * No more jobs, so free up the physical channel
1700 * Free any allocated signal on slave transfers too 1576 * Free any allocated signal on slave transfers too
1701 */ 1577 */
1702 if ((phychan->signal >= 0) && pl08x->pd->put_signal) { 1578 release_phy_channel(plchan);
1703 pl08x->pd->put_signal(plchan);
1704 phychan->signal = -1;
1705 }
1706 pl08x_put_phy_channel(pl08x, phychan);
1707 plchan->phychan = NULL;
1708 plchan->state = PL08X_CHAN_IDLE; 1579 plchan->state = PL08X_CHAN_IDLE;
1709 1580
1710 /* 1581 /*
1711 * And NOW before anyone else can grab that free:d 1582 * And NOW before anyone else can grab that free:d up
1712 * up physical channel, see if there is some memcpy 1583 * physical channel, see if there is some memcpy pending
1713 * pending that seriously needs to start because of 1584 * that seriously needs to start because of being stacked
1714 * being stacked up while we were choking the 1585 * up while we were choking the physical channels with data.
1715 * physical channels with data.
1716 */ 1586 */
1717 list_for_each_entry(waiting, &pl08x->memcpy.channels, 1587 list_for_each_entry(waiting, &pl08x->memcpy.channels,
1718 chan.device_node) { 1588 chan.device_node) {
@@ -1724,6 +1594,7 @@ static void pl08x_tasklet(unsigned long data)
1724 ret = prep_phy_channel(waiting, 1594 ret = prep_phy_channel(waiting,
1725 waiting->waiting); 1595 waiting->waiting);
1726 BUG_ON(ret); 1596 BUG_ON(ret);
1597 waiting->phychan_hold--;
1727 waiting->state = PL08X_CHAN_RUNNING; 1598 waiting->state = PL08X_CHAN_RUNNING;
1728 waiting->waiting = NULL; 1599 waiting->waiting = NULL;
1729 pl08x_issue_pending(&waiting->chan); 1600 pl08x_issue_pending(&waiting->chan);
@@ -1732,7 +1603,25 @@ static void pl08x_tasklet(unsigned long data)
1732 } 1603 }
1733 } 1604 }
1734 1605
1735 spin_unlock(&plchan->lock); 1606 spin_unlock_irqrestore(&plchan->lock, flags);
1607
1608 if (txd) {
1609 dma_async_tx_callback callback = txd->tx.callback;
1610 void *callback_param = txd->tx.callback_param;
1611
1612 /* Don't try to unmap buffers on slave channels */
1613 if (!plchan->slave)
1614 pl08x_unmap_buffers(txd);
1615
1616 /* Free the descriptor */
1617 spin_lock_irqsave(&plchan->lock, flags);
1618 pl08x_free_txd(pl08x, txd);
1619 spin_unlock_irqrestore(&plchan->lock, flags);
1620
1621 /* Callback to signal completion */
1622 if (callback)
1623 callback(callback_param);
1624 }
1736} 1625}
1737 1626
1738static irqreturn_t pl08x_irq(int irq, void *dev) 1627static irqreturn_t pl08x_irq(int irq, void *dev)
@@ -1744,9 +1633,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
1744 1633
1745 val = readl(pl08x->base + PL080_ERR_STATUS); 1634 val = readl(pl08x->base + PL080_ERR_STATUS);
1746 if (val) { 1635 if (val) {
1747 /* 1636 /* An error interrupt (on one or more channels) */
1748 * An error interrupt (on one or more channels)
1749 */
1750 dev_err(&pl08x->adev->dev, 1637 dev_err(&pl08x->adev->dev,
1751 "%s error interrupt, register value 0x%08x\n", 1638 "%s error interrupt, register value 0x%08x\n",
1752 __func__, val); 1639 __func__, val);
@@ -1770,9 +1657,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
1770 mask |= (1 << i); 1657 mask |= (1 << i);
1771 } 1658 }
1772 } 1659 }
1773 /* 1660 /* Clear only the terminal interrupts on channels we processed */
1774 * Clear only the terminal interrupts on channels we processed
1775 */
1776 writel(mask, pl08x->base + PL080_TC_CLEAR); 1661 writel(mask, pl08x->base + PL080_TC_CLEAR);
1777 1662
1778 return mask ? IRQ_HANDLED : IRQ_NONE; 1663 return mask ? IRQ_HANDLED : IRQ_NONE;
@@ -1791,6 +1676,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1791 int i; 1676 int i;
1792 1677
1793 INIT_LIST_HEAD(&dmadev->channels); 1678 INIT_LIST_HEAD(&dmadev->channels);
1679
1794 /* 1680 /*
1795 * Register as many many memcpy as we have physical channels, 1681 * Register as many many memcpy as we have physical channels,
1796 * we won't always be able to use all but the code will have 1682 * we won't always be able to use all but the code will have
@@ -1819,16 +1705,23 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1819 return -ENOMEM; 1705 return -ENOMEM;
1820 } 1706 }
1821 } 1707 }
1708 if (chan->cd->circular_buffer) {
1709 dev_err(&pl08x->adev->dev,
1710 "channel %s: circular buffers not supported\n",
1711 chan->name);
1712 kfree(chan);
1713 continue;
1714 }
1822 dev_info(&pl08x->adev->dev, 1715 dev_info(&pl08x->adev->dev,
1823 "initialize virtual channel \"%s\"\n", 1716 "initialize virtual channel \"%s\"\n",
1824 chan->name); 1717 chan->name);
1825 1718
1826 chan->chan.device = dmadev; 1719 chan->chan.device = dmadev;
1827 atomic_set(&chan->last_issued, 0); 1720 chan->chan.cookie = 0;
1828 chan->lc = atomic_read(&chan->last_issued); 1721 chan->lc = 0;
1829 1722
1830 spin_lock_init(&chan->lock); 1723 spin_lock_init(&chan->lock);
1831 INIT_LIST_HEAD(&chan->desc_list); 1724 INIT_LIST_HEAD(&chan->pend_list);
1832 tasklet_init(&chan->tasklet, pl08x_tasklet, 1725 tasklet_init(&chan->tasklet, pl08x_tasklet,
1833 (unsigned long) chan); 1726 (unsigned long) chan);
1834 1727
@@ -1898,7 +1791,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
1898 seq_printf(s, "CHANNEL:\tSTATE:\n"); 1791 seq_printf(s, "CHANNEL:\tSTATE:\n");
1899 seq_printf(s, "--------\t------\n"); 1792 seq_printf(s, "--------\t------\n");
1900 list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { 1793 list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) {
1901 seq_printf(s, "%s\t\t\%s\n", chan->name, 1794 seq_printf(s, "%s\t\t%s\n", chan->name,
1902 pl08x_state_str(chan->state)); 1795 pl08x_state_str(chan->state));
1903 } 1796 }
1904 1797
@@ -1906,7 +1799,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
1906 seq_printf(s, "CHANNEL:\tSTATE:\n"); 1799 seq_printf(s, "CHANNEL:\tSTATE:\n");
1907 seq_printf(s, "--------\t------\n"); 1800 seq_printf(s, "--------\t------\n");
1908 list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { 1801 list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) {
1909 seq_printf(s, "%s\t\t\%s\n", chan->name, 1802 seq_printf(s, "%s\t\t%s\n", chan->name,
1910 pl08x_state_str(chan->state)); 1803 pl08x_state_str(chan->state));
1911 } 1804 }
1912 1805
@@ -1942,7 +1835,7 @@ static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
1942static int pl08x_probe(struct amba_device *adev, struct amba_id *id) 1835static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
1943{ 1836{
1944 struct pl08x_driver_data *pl08x; 1837 struct pl08x_driver_data *pl08x;
1945 struct vendor_data *vd = id->data; 1838 const struct vendor_data *vd = id->data;
1946 int ret = 0; 1839 int ret = 0;
1947 int i; 1840 int i;
1948 1841
@@ -1990,6 +1883,14 @@ static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
1990 pl08x->adev = adev; 1883 pl08x->adev = adev;
1991 pl08x->vd = vd; 1884 pl08x->vd = vd;
1992 1885
1886 /* By default, AHB1 only. If dualmaster, from platform */
1887 pl08x->lli_buses = PL08X_AHB1;
1888 pl08x->mem_buses = PL08X_AHB1;
1889 if (pl08x->vd->dualmaster) {
1890 pl08x->lli_buses = pl08x->pd->lli_buses;
1891 pl08x->mem_buses = pl08x->pd->mem_buses;
1892 }
1893
1993 /* A DMA memory pool for LLIs, align on 1-byte boundary */ 1894 /* A DMA memory pool for LLIs, align on 1-byte boundary */
1994 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, 1895 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
1995 PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0); 1896 PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0);
@@ -2009,14 +1910,12 @@ static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
2009 /* Turn on the PL08x */ 1910 /* Turn on the PL08x */
2010 pl08x_ensure_on(pl08x); 1911 pl08x_ensure_on(pl08x);
2011 1912
2012 /* 1913 /* Attach the interrupt handler */
2013 * Attach the interrupt handler
2014 */
2015 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); 1914 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
2016 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); 1915 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
2017 1916
2018 ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, 1917 ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED,
2019 vd->name, pl08x); 1918 DRIVER_NAME, pl08x);
2020 if (ret) { 1919 if (ret) {
2021 dev_err(&adev->dev, "%s failed to request interrupt %d\n", 1920 dev_err(&adev->dev, "%s failed to request interrupt %d\n",
2022 __func__, adev->irq[0]); 1921 __func__, adev->irq[0]);
@@ -2087,8 +1986,9 @@ static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
2087 1986
2088 amba_set_drvdata(adev, pl08x); 1987 amba_set_drvdata(adev, pl08x);
2089 init_pl08x_debugfs(pl08x); 1988 init_pl08x_debugfs(pl08x);
2090 dev_info(&pl08x->adev->dev, "ARM(R) %s DMA block initialized @%08x\n", 1989 dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
2091 vd->name, adev->res.start); 1990 amba_part(adev), amba_rev(adev),
1991 (unsigned long long)adev->res.start, adev->irq[0]);
2092 return 0; 1992 return 0;
2093 1993
2094out_no_slave_reg: 1994out_no_slave_reg:
@@ -2115,13 +2015,11 @@ out_no_pl08x:
2115 2015
2116/* PL080 has 8 channels and the PL080 have just 2 */ 2016/* PL080 has 8 channels and the PL080 have just 2 */
2117static struct vendor_data vendor_pl080 = { 2017static struct vendor_data vendor_pl080 = {
2118 .name = "PL080",
2119 .channels = 8, 2018 .channels = 8,
2120 .dualmaster = true, 2019 .dualmaster = true,
2121}; 2020};
2122 2021
2123static struct vendor_data vendor_pl081 = { 2022static struct vendor_data vendor_pl081 = {
2124 .name = "PL081",
2125 .channels = 2, 2023 .channels = 2,
2126 .dualmaster = false, 2024 .dualmaster = false,
2127}; 2025};
@@ -2160,7 +2058,7 @@ static int __init pl08x_init(void)
2160 retval = amba_driver_register(&pl08x_amba_driver); 2058 retval = amba_driver_register(&pl08x_amba_driver);
2161 if (retval) 2059 if (retval)
2162 printk(KERN_WARNING DRIVER_NAME 2060 printk(KERN_WARNING DRIVER_NAME
2163 "failed to register as an amba device (%d)\n", 2061 "failed to register as an AMBA device (%d)\n",
2164 retval); 2062 retval);
2165 return retval; 2063 return retval;
2166} 2064}