aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/amba-pl08x.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/amba-pl08x.c')
-rw-r--r--drivers/dma/amba-pl08x.c501
1 files changed, 363 insertions, 138 deletions
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index bff41d4848e5..fce46c5bf1c7 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -24,6 +24,7 @@
24 * 24 *
25 * Documentation: ARM DDI 0196G == PL080 25 * Documentation: ARM DDI 0196G == PL080
26 * Documentation: ARM DDI 0218E == PL081 26 * Documentation: ARM DDI 0218E == PL081
27 * Documentation: S3C6410 User's Manual == PL080S
27 * 28 *
28 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any 29 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
29 * channel. 30 * channel.
@@ -36,6 +37,14 @@
36 * 37 *
37 * The PL080 has a dual bus master, PL081 has a single master. 38 * The PL080 has a dual bus master, PL081 has a single master.
38 * 39 *
40 * PL080S is a version modified by Samsung and used in S3C64xx SoCs.
41 * It differs in following aspects:
42 * - CH_CONFIG register at different offset,
43 * - separate CH_CONTROL2 register for transfer size,
44 * - bigger maximum transfer size,
45 * - 8-word aligned LLI, instead of 4-word, due to extra CCTL2 word,
46 * - no support for peripheral flow control.
47 *
39 * Memory to peripheral transfer may be visualized as 48 * Memory to peripheral transfer may be visualized as
40 * Get data from memory to DMAC 49 * Get data from memory to DMAC
41 * Until no data left 50 * Until no data left
@@ -64,10 +73,7 @@
64 * - Peripheral flow control: the transfer size is ignored (and should be 73 * - Peripheral flow control: the transfer size is ignored (and should be
65 * zero). The data is transferred from the current LLI entry, until 74 * zero). The data is transferred from the current LLI entry, until
66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC 75 * after the final transfer signalled by LBREQ or LSREQ. The DMAC
67 * will then move to the next LLI entry. 76 * will then move to the next LLI entry. Unsupported by PL080S.
68 *
69 * Global TODO:
70 * - Break out common code from arch/arm/mach-s3c64xx and share
71 */ 77 */
72#include <linux/amba/bus.h> 78#include <linux/amba/bus.h>
73#include <linux/amba/pl08x.h> 79#include <linux/amba/pl08x.h>
@@ -100,24 +106,16 @@ struct pl08x_driver_data;
100 * @nomadik: whether the channels have Nomadik security extension bits 106 * @nomadik: whether the channels have Nomadik security extension bits
101 * that need to be checked for permission before use and some registers are 107 * that need to be checked for permission before use and some registers are
102 * missing 108 * missing
109 * @pl080s: whether this version is a PL080S, which has separate register and
110 * LLI word for transfer size.
103 */ 111 */
104struct vendor_data { 112struct vendor_data {
113 u8 config_offset;
105 u8 channels; 114 u8 channels;
106 bool dualmaster; 115 bool dualmaster;
107 bool nomadik; 116 bool nomadik;
108}; 117 bool pl080s;
109 118 u32 max_transfer_size;
110/*
111 * PL08X private data structures
112 * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit,
113 * start & end do not - their bus bit info is in cctl. Also note that these
114 * are fixed 32-bit quantities.
115 */
116struct pl08x_lli {
117 u32 src;
118 u32 dst;
119 u32 lli;
120 u32 cctl;
121}; 119};
122 120
123/** 121/**
@@ -147,6 +145,7 @@ struct pl08x_bus_data {
147struct pl08x_phy_chan { 145struct pl08x_phy_chan {
148 unsigned int id; 146 unsigned int id;
149 void __iomem *base; 147 void __iomem *base;
148 void __iomem *reg_config;
150 spinlock_t lock; 149 spinlock_t lock;
151 struct pl08x_dma_chan *serving; 150 struct pl08x_dma_chan *serving;
152 bool locked; 151 bool locked;
@@ -176,12 +175,13 @@ struct pl08x_sg {
176 * @ccfg: config reg values for current txd 175 * @ccfg: config reg values for current txd
177 * @done: this marks completed descriptors, which should not have their 176 * @done: this marks completed descriptors, which should not have their
178 * mux released. 177 * mux released.
178 * @cyclic: indicate cyclic transfers
179 */ 179 */
180struct pl08x_txd { 180struct pl08x_txd {
181 struct virt_dma_desc vd; 181 struct virt_dma_desc vd;
182 struct list_head dsg_list; 182 struct list_head dsg_list;
183 dma_addr_t llis_bus; 183 dma_addr_t llis_bus;
184 struct pl08x_lli *llis_va; 184 u32 *llis_va;
185 /* Default cctl value for LLIs */ 185 /* Default cctl value for LLIs */
186 u32 cctl; 186 u32 cctl;
187 /* 187 /*
@@ -190,6 +190,7 @@ struct pl08x_txd {
190 */ 190 */
191 u32 ccfg; 191 u32 ccfg;
192 bool done; 192 bool done;
193 bool cyclic;
193}; 194};
194 195
195/** 196/**
@@ -265,17 +266,29 @@ struct pl08x_driver_data {
265 struct dma_pool *pool; 266 struct dma_pool *pool;
266 u8 lli_buses; 267 u8 lli_buses;
267 u8 mem_buses; 268 u8 mem_buses;
269 u8 lli_words;
268}; 270};
269 271
270/* 272/*
271 * PL08X specific defines 273 * PL08X specific defines
272 */ 274 */
273 275
274/* Size (bytes) of each LLI buffer allocated for one transfer */ 276/* The order of words in an LLI. */
275# define PL08X_LLI_TSFR_SIZE 0x2000 277#define PL080_LLI_SRC 0
278#define PL080_LLI_DST 1
279#define PL080_LLI_LLI 2
280#define PL080_LLI_CCTL 3
281#define PL080S_LLI_CCTL2 4
282
283/* Total words in an LLI. */
284#define PL080_LLI_WORDS 4
285#define PL080S_LLI_WORDS 8
276 286
277/* Maximum times we call dma_pool_alloc on this pool without freeing */ 287/*
278#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli)) 288 * Number of LLIs in each LLI buffer allocated for one transfer
289 * (maximum times we call dma_pool_alloc on this pool without freeing)
290 */
291#define MAX_NUM_TSFR_LLIS 512
279#define PL08X_ALIGN 8 292#define PL08X_ALIGN 8
280 293
281static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) 294static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
@@ -336,10 +349,39 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
336{ 349{
337 unsigned int val; 350 unsigned int val;
338 351
339 val = readl(ch->base + PL080_CH_CONFIG); 352 val = readl(ch->reg_config);
340 return val & PL080_CONFIG_ACTIVE; 353 return val & PL080_CONFIG_ACTIVE;
341} 354}
342 355
356static void pl08x_write_lli(struct pl08x_driver_data *pl08x,
357 struct pl08x_phy_chan *phychan, const u32 *lli, u32 ccfg)
358{
359 if (pl08x->vd->pl080s)
360 dev_vdbg(&pl08x->adev->dev,
361 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
362 "clli=0x%08x, cctl=0x%08x, cctl2=0x%08x, ccfg=0x%08x\n",
363 phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST],
364 lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL],
365 lli[PL080S_LLI_CCTL2], ccfg);
366 else
367 dev_vdbg(&pl08x->adev->dev,
368 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
369 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
370 phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST],
371 lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], ccfg);
372
373 writel_relaxed(lli[PL080_LLI_SRC], phychan->base + PL080_CH_SRC_ADDR);
374 writel_relaxed(lli[PL080_LLI_DST], phychan->base + PL080_CH_DST_ADDR);
375 writel_relaxed(lli[PL080_LLI_LLI], phychan->base + PL080_CH_LLI);
376 writel_relaxed(lli[PL080_LLI_CCTL], phychan->base + PL080_CH_CONTROL);
377
378 if (pl08x->vd->pl080s)
379 writel_relaxed(lli[PL080S_LLI_CCTL2],
380 phychan->base + PL080S_CH_CONTROL2);
381
382 writel(ccfg, phychan->reg_config);
383}
384
343/* 385/*
344 * Set the initial DMA register values i.e. those for the first LLI 386 * Set the initial DMA register values i.e. those for the first LLI
345 * The next LLI pointer and the configuration interrupt bit have 387 * The next LLI pointer and the configuration interrupt bit have
@@ -352,7 +394,6 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
352 struct pl08x_phy_chan *phychan = plchan->phychan; 394 struct pl08x_phy_chan *phychan = plchan->phychan;
353 struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc); 395 struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc);
354 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 396 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
355 struct pl08x_lli *lli;
356 u32 val; 397 u32 val;
357 398
358 list_del(&txd->vd.node); 399 list_del(&txd->vd.node);
@@ -363,19 +404,7 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
363 while (pl08x_phy_channel_busy(phychan)) 404 while (pl08x_phy_channel_busy(phychan))
364 cpu_relax(); 405 cpu_relax();
365 406
366 lli = &txd->llis_va[0]; 407 pl08x_write_lli(pl08x, phychan, &txd->llis_va[0], txd->ccfg);
367
368 dev_vdbg(&pl08x->adev->dev,
369 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
370 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
371 phychan->id, lli->src, lli->dst, lli->lli, lli->cctl,
372 txd->ccfg);
373
374 writel(lli->src, phychan->base + PL080_CH_SRC_ADDR);
375 writel(lli->dst, phychan->base + PL080_CH_DST_ADDR);
376 writel(lli->lli, phychan->base + PL080_CH_LLI);
377 writel(lli->cctl, phychan->base + PL080_CH_CONTROL);
378 writel(txd->ccfg, phychan->base + PL080_CH_CONFIG);
379 408
380 /* Enable the DMA channel */ 409 /* Enable the DMA channel */
381 /* Do not access config register until channel shows as disabled */ 410 /* Do not access config register until channel shows as disabled */
@@ -383,11 +412,11 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
383 cpu_relax(); 412 cpu_relax();
384 413
385 /* Do not access config register until channel shows as inactive */ 414 /* Do not access config register until channel shows as inactive */
386 val = readl(phychan->base + PL080_CH_CONFIG); 415 val = readl(phychan->reg_config);
387 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) 416 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
388 val = readl(phychan->base + PL080_CH_CONFIG); 417 val = readl(phychan->reg_config);
389 418
390 writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG); 419 writel(val | PL080_CONFIG_ENABLE, phychan->reg_config);
391} 420}
392 421
393/* 422/*
@@ -406,9 +435,9 @@ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
406 int timeout; 435 int timeout;
407 436
408 /* Set the HALT bit and wait for the FIFO to drain */ 437 /* Set the HALT bit and wait for the FIFO to drain */
409 val = readl(ch->base + PL080_CH_CONFIG); 438 val = readl(ch->reg_config);
410 val |= PL080_CONFIG_HALT; 439 val |= PL080_CONFIG_HALT;
411 writel(val, ch->base + PL080_CH_CONFIG); 440 writel(val, ch->reg_config);
412 441
413 /* Wait for channel inactive */ 442 /* Wait for channel inactive */
414 for (timeout = 1000; timeout; timeout--) { 443 for (timeout = 1000; timeout; timeout--) {
@@ -425,9 +454,9 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
425 u32 val; 454 u32 val;
426 455
427 /* Clear the HALT bit */ 456 /* Clear the HALT bit */
428 val = readl(ch->base + PL080_CH_CONFIG); 457 val = readl(ch->reg_config);
429 val &= ~PL080_CONFIG_HALT; 458 val &= ~PL080_CONFIG_HALT;
430 writel(val, ch->base + PL080_CH_CONFIG); 459 writel(val, ch->reg_config);
431} 460}
432 461
433/* 462/*
@@ -439,12 +468,12 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
439static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x, 468static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
440 struct pl08x_phy_chan *ch) 469 struct pl08x_phy_chan *ch)
441{ 470{
442 u32 val = readl(ch->base + PL080_CH_CONFIG); 471 u32 val = readl(ch->reg_config);
443 472
444 val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK | 473 val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
445 PL080_CONFIG_TC_IRQ_MASK); 474 PL080_CONFIG_TC_IRQ_MASK);
446 475
447 writel(val, ch->base + PL080_CH_CONFIG); 476 writel(val, ch->reg_config);
448 477
449 writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR); 478 writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
450 writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR); 479 writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
@@ -455,6 +484,28 @@ static inline u32 get_bytes_in_cctl(u32 cctl)
455 /* The source width defines the number of bytes */ 484 /* The source width defines the number of bytes */
456 u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK; 485 u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK;
457 486
487 cctl &= PL080_CONTROL_SWIDTH_MASK;
488
489 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
490 case PL080_WIDTH_8BIT:
491 break;
492 case PL080_WIDTH_16BIT:
493 bytes *= 2;
494 break;
495 case PL080_WIDTH_32BIT:
496 bytes *= 4;
497 break;
498 }
499 return bytes;
500}
501
502static inline u32 get_bytes_in_cctl_pl080s(u32 cctl, u32 cctl1)
503{
504 /* The source width defines the number of bytes */
505 u32 bytes = cctl1 & PL080S_CONTROL_TRANSFER_SIZE_MASK;
506
507 cctl &= PL080_CONTROL_SWIDTH_MASK;
508
458 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { 509 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
459 case PL080_WIDTH_8BIT: 510 case PL080_WIDTH_8BIT:
460 break; 511 break;
@@ -471,47 +522,66 @@ static inline u32 get_bytes_in_cctl(u32 cctl)
471/* The channel should be paused when calling this */ 522/* The channel should be paused when calling this */
472static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) 523static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
473{ 524{
525 struct pl08x_driver_data *pl08x = plchan->host;
526 const u32 *llis_va, *llis_va_limit;
474 struct pl08x_phy_chan *ch; 527 struct pl08x_phy_chan *ch;
528 dma_addr_t llis_bus;
475 struct pl08x_txd *txd; 529 struct pl08x_txd *txd;
476 size_t bytes = 0; 530 u32 llis_max_words;
531 size_t bytes;
532 u32 clli;
477 533
478 ch = plchan->phychan; 534 ch = plchan->phychan;
479 txd = plchan->at; 535 txd = plchan->at;
480 536
537 if (!ch || !txd)
538 return 0;
539
481 /* 540 /*
482 * Follow the LLIs to get the number of remaining 541 * Follow the LLIs to get the number of remaining
483 * bytes in the currently active transaction. 542 * bytes in the currently active transaction.
484 */ 543 */
485 if (ch && txd) { 544 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
486 u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
487 545
488 /* First get the remaining bytes in the active transfer */ 546 /* First get the remaining bytes in the active transfer */
547 if (pl08x->vd->pl080s)
548 bytes = get_bytes_in_cctl_pl080s(
549 readl(ch->base + PL080_CH_CONTROL),
550 readl(ch->base + PL080S_CH_CONTROL2));
551 else
489 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); 552 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
490 553
491 if (clli) { 554 if (!clli)
492 struct pl08x_lli *llis_va = txd->llis_va; 555 return bytes;
493 dma_addr_t llis_bus = txd->llis_bus;
494 int index;
495 556
496 BUG_ON(clli < llis_bus || clli >= llis_bus + 557 llis_va = txd->llis_va;
497 sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS); 558 llis_bus = txd->llis_bus;
498 559
499 /* 560 llis_max_words = pl08x->lli_words * MAX_NUM_TSFR_LLIS;
500 * Locate the next LLI - as this is an array, 561 BUG_ON(clli < llis_bus || clli >= llis_bus +
501 * it's simple maths to find. 562 sizeof(u32) * llis_max_words);
502 */
503 index = (clli - llis_bus) / sizeof(struct pl08x_lli);
504 563
505 for (; index < MAX_NUM_TSFR_LLIS; index++) { 564 /*
506 bytes += get_bytes_in_cctl(llis_va[index].cctl); 565 * Locate the next LLI - as this is an array,
566 * it's simple maths to find.
567 */
568 llis_va += (clli - llis_bus) / sizeof(u32);
507 569
508 /* 570 llis_va_limit = llis_va + llis_max_words;
509 * A LLI pointer of 0 terminates the LLI list 571
510 */ 572 for (; llis_va < llis_va_limit; llis_va += pl08x->lli_words) {
511 if (!llis_va[index].lli) 573 if (pl08x->vd->pl080s)
512 break; 574 bytes += get_bytes_in_cctl_pl080s(
513 } 575 llis_va[PL080_LLI_CCTL],
514 } 576 llis_va[PL080S_LLI_CCTL2]);
577 else
578 bytes += get_bytes_in_cctl(llis_va[PL080_LLI_CCTL]);
579
580 /*
581 * A LLI pointer going backward terminates the LLI list
582 */
583 if (llis_va[PL080_LLI_LLI] <= clli)
584 break;
515 } 585 }
516 586
517 return bytes; 587 return bytes;
@@ -722,6 +792,7 @@ static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
722 break; 792 break;
723 } 793 }
724 794
795 tsize &= PL080_CONTROL_TRANSFER_SIZE_MASK;
725 retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT; 796 retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT;
726 return retbits; 797 return retbits;
727} 798}
@@ -766,20 +837,26 @@ static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
766/* 837/*
767 * Fills in one LLI for a certain transfer descriptor and advance the counter 838 * Fills in one LLI for a certain transfer descriptor and advance the counter
768 */ 839 */
769static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd, 840static void pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x,
770 int num_llis, int len, u32 cctl) 841 struct pl08x_lli_build_data *bd,
842 int num_llis, int len, u32 cctl, u32 cctl2)
771{ 843{
772 struct pl08x_lli *llis_va = bd->txd->llis_va; 844 u32 offset = num_llis * pl08x->lli_words;
845 u32 *llis_va = bd->txd->llis_va + offset;
773 dma_addr_t llis_bus = bd->txd->llis_bus; 846 dma_addr_t llis_bus = bd->txd->llis_bus;
774 847
775 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); 848 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
776 849
777 llis_va[num_llis].cctl = cctl; 850 /* Advance the offset to next LLI. */
778 llis_va[num_llis].src = bd->srcbus.addr; 851 offset += pl08x->lli_words;
779 llis_va[num_llis].dst = bd->dstbus.addr; 852
780 llis_va[num_llis].lli = llis_bus + (num_llis + 1) * 853 llis_va[PL080_LLI_SRC] = bd->srcbus.addr;
781 sizeof(struct pl08x_lli); 854 llis_va[PL080_LLI_DST] = bd->dstbus.addr;
782 llis_va[num_llis].lli |= bd->lli_bus; 855 llis_va[PL080_LLI_LLI] = (llis_bus + sizeof(u32) * offset);
856 llis_va[PL080_LLI_LLI] |= bd->lli_bus;
857 llis_va[PL080_LLI_CCTL] = cctl;
858 if (pl08x->vd->pl080s)
859 llis_va[PL080S_LLI_CCTL2] = cctl2;
783 860
784 if (cctl & PL080_CONTROL_SRC_INCR) 861 if (cctl & PL080_CONTROL_SRC_INCR)
785 bd->srcbus.addr += len; 862 bd->srcbus.addr += len;
@@ -791,14 +868,53 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
791 bd->remainder -= len; 868 bd->remainder -= len;
792} 869}
793 870
794static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd, 871static inline void prep_byte_width_lli(struct pl08x_driver_data *pl08x,
795 u32 *cctl, u32 len, int num_llis, size_t *total_bytes) 872 struct pl08x_lli_build_data *bd, u32 *cctl, u32 len,
873 int num_llis, size_t *total_bytes)
796{ 874{
797 *cctl = pl08x_cctl_bits(*cctl, 1, 1, len); 875 *cctl = pl08x_cctl_bits(*cctl, 1, 1, len);
798 pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl); 876 pl08x_fill_lli_for_desc(pl08x, bd, num_llis, len, *cctl, len);
799 (*total_bytes) += len; 877 (*total_bytes) += len;
800} 878}
801 879
880#ifdef VERBOSE_DEBUG
881static void pl08x_dump_lli(struct pl08x_driver_data *pl08x,
882 const u32 *llis_va, int num_llis)
883{
884 int i;
885
886 if (pl08x->vd->pl080s) {
887 dev_vdbg(&pl08x->adev->dev,
888 "%-3s %-9s %-10s %-10s %-10s %-10s %s\n",
889 "lli", "", "csrc", "cdst", "clli", "cctl", "cctl2");
890 for (i = 0; i < num_llis; i++) {
891 dev_vdbg(&pl08x->adev->dev,
892 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
893 i, llis_va, llis_va[PL080_LLI_SRC],
894 llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI],
895 llis_va[PL080_LLI_CCTL],
896 llis_va[PL080S_LLI_CCTL2]);
897 llis_va += pl08x->lli_words;
898 }
899 } else {
900 dev_vdbg(&pl08x->adev->dev,
901 "%-3s %-9s %-10s %-10s %-10s %s\n",
902 "lli", "", "csrc", "cdst", "clli", "cctl");
903 for (i = 0; i < num_llis; i++) {
904 dev_vdbg(&pl08x->adev->dev,
905 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
906 i, llis_va, llis_va[PL080_LLI_SRC],
907 llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI],
908 llis_va[PL080_LLI_CCTL]);
909 llis_va += pl08x->lli_words;
910 }
911 }
912}
913#else
914static inline void pl08x_dump_lli(struct pl08x_driver_data *pl08x,
915 const u32 *llis_va, int num_llis) {}
916#endif
917
802/* 918/*
803 * This fills in the table of LLIs for the transfer descriptor 919 * This fills in the table of LLIs for the transfer descriptor
804 * Note that we assume we never have to change the burst sizes 920 * Note that we assume we never have to change the burst sizes
@@ -812,7 +928,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
812 int num_llis = 0; 928 int num_llis = 0;
813 u32 cctl, early_bytes = 0; 929 u32 cctl, early_bytes = 0;
814 size_t max_bytes_per_lli, total_bytes; 930 size_t max_bytes_per_lli, total_bytes;
815 struct pl08x_lli *llis_va; 931 u32 *llis_va, *last_lli;
816 struct pl08x_sg *dsg; 932 struct pl08x_sg *dsg;
817 933
818 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus); 934 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
@@ -902,7 +1018,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
902 1018
903 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, 1019 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
904 bd.dstbus.buswidth, 0); 1020 bd.dstbus.buswidth, 0);
905 pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl); 1021 pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++,
1022 0, cctl, 0);
906 break; 1023 break;
907 } 1024 }
908 1025
@@ -924,8 +1041,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
924 dev_vdbg(&pl08x->adev->dev, 1041 dev_vdbg(&pl08x->adev->dev,
925 "%s byte width LLIs (remain 0x%08x)\n", 1042 "%s byte width LLIs (remain 0x%08x)\n",
926 __func__, bd.remainder); 1043 __func__, bd.remainder);
927 prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++, 1044 prep_byte_width_lli(pl08x, &bd, &cctl, early_bytes,
928 &total_bytes); 1045 num_llis++, &total_bytes);
929 } 1046 }
930 1047
931 if (bd.remainder) { 1048 if (bd.remainder) {
@@ -946,7 +1063,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
946 * MIN(buswidths) 1063 * MIN(buswidths)
947 */ 1064 */
948 max_bytes_per_lli = bd.srcbus.buswidth * 1065 max_bytes_per_lli = bd.srcbus.buswidth *
949 PL080_CONTROL_TRANSFER_SIZE_MASK; 1066 pl08x->vd->max_transfer_size;
950 dev_vdbg(&pl08x->adev->dev, 1067 dev_vdbg(&pl08x->adev->dev,
951 "%s max bytes per lli = %zu\n", 1068 "%s max bytes per lli = %zu\n",
952 __func__, max_bytes_per_lli); 1069 __func__, max_bytes_per_lli);
@@ -981,8 +1098,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
981 1098
982 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, 1099 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
983 bd.dstbus.buswidth, tsize); 1100 bd.dstbus.buswidth, tsize);
984 pl08x_fill_lli_for_desc(&bd, num_llis++, 1101 pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++,
985 lli_len, cctl); 1102 lli_len, cctl, tsize);
986 total_bytes += lli_len; 1103 total_bytes += lli_len;
987 } 1104 }
988 1105
@@ -993,8 +1110,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
993 dev_vdbg(&pl08x->adev->dev, 1110 dev_vdbg(&pl08x->adev->dev,
994 "%s align with boundary, send odd bytes (remain %zu)\n", 1111 "%s align with boundary, send odd bytes (remain %zu)\n",
995 __func__, bd.remainder); 1112 __func__, bd.remainder);
996 prep_byte_width_lli(&bd, &cctl, bd.remainder, 1113 prep_byte_width_lli(pl08x, &bd, &cctl,
997 num_llis++, &total_bytes); 1114 bd.remainder, num_llis++, &total_bytes);
998 } 1115 }
999 } 1116 }
1000 1117
@@ -1008,33 +1125,25 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
1008 if (num_llis >= MAX_NUM_TSFR_LLIS) { 1125 if (num_llis >= MAX_NUM_TSFR_LLIS) {
1009 dev_err(&pl08x->adev->dev, 1126 dev_err(&pl08x->adev->dev,
1010 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", 1127 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
1011 __func__, (u32) MAX_NUM_TSFR_LLIS); 1128 __func__, MAX_NUM_TSFR_LLIS);
1012 return 0; 1129 return 0;
1013 } 1130 }
1014 } 1131 }
1015 1132
1016 llis_va = txd->llis_va; 1133 llis_va = txd->llis_va;
1017 /* The final LLI terminates the LLI. */ 1134 last_lli = llis_va + (num_llis - 1) * pl08x->lli_words;
1018 llis_va[num_llis - 1].lli = 0;
1019 /* The final LLI element shall also fire an interrupt. */
1020 llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
1021
1022#ifdef VERBOSE_DEBUG
1023 {
1024 int i;
1025 1135
1026 dev_vdbg(&pl08x->adev->dev, 1136 if (txd->cyclic) {
1027 "%-3s %-9s %-10s %-10s %-10s %s\n", 1137 /* Link back to the first LLI. */
1028 "lli", "", "csrc", "cdst", "clli", "cctl"); 1138 last_lli[PL080_LLI_LLI] = txd->llis_bus | bd.lli_bus;
1029 for (i = 0; i < num_llis; i++) { 1139 } else {
1030 dev_vdbg(&pl08x->adev->dev, 1140 /* The final LLI terminates the LLI. */
1031 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n", 1141 last_lli[PL080_LLI_LLI] = 0;
1032 i, &llis_va[i], llis_va[i].src, 1142 /* The final LLI element shall also fire an interrupt. */
1033 llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl 1143 last_lli[PL080_LLI_CCTL] |= PL080_CONTROL_TC_IRQ_EN;
1034 );
1035 }
1036 } 1144 }
1037#endif 1145
1146 pl08x_dump_lli(pl08x, llis_va, num_llis);
1038 1147
1039 return num_llis; 1148 return num_llis;
1040} 1149}
@@ -1310,6 +1419,7 @@ static int dma_set_runtime_config(struct dma_chan *chan,
1310 struct dma_slave_config *config) 1419 struct dma_slave_config *config)
1311{ 1420{
1312 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1421 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1422 struct pl08x_driver_data *pl08x = plchan->host;
1313 1423
1314 if (!plchan->slave) 1424 if (!plchan->slave)
1315 return -EINVAL; 1425 return -EINVAL;
@@ -1319,6 +1429,13 @@ static int dma_set_runtime_config(struct dma_chan *chan,
1319 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 1429 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
1320 return -EINVAL; 1430 return -EINVAL;
1321 1431
1432 if (config->device_fc && pl08x->vd->pl080s) {
1433 dev_err(&pl08x->adev->dev,
1434 "%s: PL080S does not support peripheral flow control\n",
1435 __func__);
1436 return -EINVAL;
1437 }
1438
1322 plchan->cfg = *config; 1439 plchan->cfg = *config;
1323 1440
1324 return 0; 1441 return 0;
@@ -1409,25 +1526,19 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1409 return vchan_tx_prep(&plchan->vc, &txd->vd, flags); 1526 return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
1410} 1527}
1411 1528
1412static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( 1529static struct pl08x_txd *pl08x_init_txd(
1413 struct dma_chan *chan, struct scatterlist *sgl, 1530 struct dma_chan *chan,
1414 unsigned int sg_len, enum dma_transfer_direction direction, 1531 enum dma_transfer_direction direction,
1415 unsigned long flags, void *context) 1532 dma_addr_t *slave_addr)
1416{ 1533{
1417 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1534 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1418 struct pl08x_driver_data *pl08x = plchan->host; 1535 struct pl08x_driver_data *pl08x = plchan->host;
1419 struct pl08x_txd *txd; 1536 struct pl08x_txd *txd;
1420 struct pl08x_sg *dsg;
1421 struct scatterlist *sg;
1422 enum dma_slave_buswidth addr_width; 1537 enum dma_slave_buswidth addr_width;
1423 dma_addr_t slave_addr;
1424 int ret, tmp; 1538 int ret, tmp;
1425 u8 src_buses, dst_buses; 1539 u8 src_buses, dst_buses;
1426 u32 maxburst, cctl; 1540 u32 maxburst, cctl;
1427 1541
1428 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
1429 __func__, sg_dma_len(sgl), plchan->name);
1430
1431 txd = pl08x_get_txd(plchan); 1542 txd = pl08x_get_txd(plchan);
1432 if (!txd) { 1543 if (!txd) {
1433 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); 1544 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
@@ -1441,14 +1552,14 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1441 */ 1552 */
1442 if (direction == DMA_MEM_TO_DEV) { 1553 if (direction == DMA_MEM_TO_DEV) {
1443 cctl = PL080_CONTROL_SRC_INCR; 1554 cctl = PL080_CONTROL_SRC_INCR;
1444 slave_addr = plchan->cfg.dst_addr; 1555 *slave_addr = plchan->cfg.dst_addr;
1445 addr_width = plchan->cfg.dst_addr_width; 1556 addr_width = plchan->cfg.dst_addr_width;
1446 maxburst = plchan->cfg.dst_maxburst; 1557 maxburst = plchan->cfg.dst_maxburst;
1447 src_buses = pl08x->mem_buses; 1558 src_buses = pl08x->mem_buses;
1448 dst_buses = plchan->cd->periph_buses; 1559 dst_buses = plchan->cd->periph_buses;
1449 } else if (direction == DMA_DEV_TO_MEM) { 1560 } else if (direction == DMA_DEV_TO_MEM) {
1450 cctl = PL080_CONTROL_DST_INCR; 1561 cctl = PL080_CONTROL_DST_INCR;
1451 slave_addr = plchan->cfg.src_addr; 1562 *slave_addr = plchan->cfg.src_addr;
1452 addr_width = plchan->cfg.src_addr_width; 1563 addr_width = plchan->cfg.src_addr_width;
1453 maxburst = plchan->cfg.src_maxburst; 1564 maxburst = plchan->cfg.src_maxburst;
1454 src_buses = plchan->cd->periph_buses; 1565 src_buses = plchan->cd->periph_buses;
@@ -1497,24 +1608,107 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1497 else 1608 else
1498 txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT; 1609 txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
1499 1610
1611 return txd;
1612}
1613
1614static int pl08x_tx_add_sg(struct pl08x_txd *txd,
1615 enum dma_transfer_direction direction,
1616 dma_addr_t slave_addr,
1617 dma_addr_t buf_addr,
1618 unsigned int len)
1619{
1620 struct pl08x_sg *dsg;
1621
1622 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1623 if (!dsg)
1624 return -ENOMEM;
1625
1626 list_add_tail(&dsg->node, &txd->dsg_list);
1627
1628 dsg->len = len;
1629 if (direction == DMA_MEM_TO_DEV) {
1630 dsg->src_addr = buf_addr;
1631 dsg->dst_addr = slave_addr;
1632 } else {
1633 dsg->src_addr = slave_addr;
1634 dsg->dst_addr = buf_addr;
1635 }
1636
1637 return 0;
1638}
1639
1640static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1641 struct dma_chan *chan, struct scatterlist *sgl,
1642 unsigned int sg_len, enum dma_transfer_direction direction,
1643 unsigned long flags, void *context)
1644{
1645 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1646 struct pl08x_driver_data *pl08x = plchan->host;
1647 struct pl08x_txd *txd;
1648 struct scatterlist *sg;
1649 int ret, tmp;
1650 dma_addr_t slave_addr;
1651
1652 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
1653 __func__, sg_dma_len(sgl), plchan->name);
1654
1655 txd = pl08x_init_txd(chan, direction, &slave_addr);
1656 if (!txd)
1657 return NULL;
1658
1500 for_each_sg(sgl, sg, sg_len, tmp) { 1659 for_each_sg(sgl, sg, sg_len, tmp) {
1501 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); 1660 ret = pl08x_tx_add_sg(txd, direction, slave_addr,
1502 if (!dsg) { 1661 sg_dma_address(sg),
1662 sg_dma_len(sg));
1663 if (ret) {
1503 pl08x_release_mux(plchan); 1664 pl08x_release_mux(plchan);
1504 pl08x_free_txd(pl08x, txd); 1665 pl08x_free_txd(pl08x, txd);
1505 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n", 1666 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
1506 __func__); 1667 __func__);
1507 return NULL; 1668 return NULL;
1508 } 1669 }
1509 list_add_tail(&dsg->node, &txd->dsg_list); 1670 }
1510 1671
1511 dsg->len = sg_dma_len(sg); 1672 ret = pl08x_fill_llis_for_desc(plchan->host, txd);
1512 if (direction == DMA_MEM_TO_DEV) { 1673 if (!ret) {
1513 dsg->src_addr = sg_dma_address(sg); 1674 pl08x_release_mux(plchan);
1514 dsg->dst_addr = slave_addr; 1675 pl08x_free_txd(pl08x, txd);
1515 } else { 1676 return NULL;
1516 dsg->src_addr = slave_addr; 1677 }
1517 dsg->dst_addr = sg_dma_address(sg); 1678
1679 return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
1680}
1681
1682static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic(
1683 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1684 size_t period_len, enum dma_transfer_direction direction,
1685 unsigned long flags, void *context)
1686{
1687 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1688 struct pl08x_driver_data *pl08x = plchan->host;
1689 struct pl08x_txd *txd;
1690 int ret, tmp;
1691 dma_addr_t slave_addr;
1692
1693 dev_dbg(&pl08x->adev->dev,
1694 "%s prepare cyclic transaction of %d/%d bytes %s %s\n",
1695 __func__, period_len, buf_len,
1696 direction == DMA_MEM_TO_DEV ? "to" : "from",
1697 plchan->name);
1698
1699 txd = pl08x_init_txd(chan, direction, &slave_addr);
1700 if (!txd)
1701 return NULL;
1702
1703 txd->cyclic = true;
1704 txd->cctl |= PL080_CONTROL_TC_IRQ_EN;
1705 for (tmp = 0; tmp < buf_len; tmp += period_len) {
1706 ret = pl08x_tx_add_sg(txd, direction, slave_addr,
1707 buf_addr + tmp, period_len);
1708 if (ret) {
1709 pl08x_release_mux(plchan);
1710 pl08x_free_txd(pl08x, txd);
1711 return NULL;
1518 } 1712 }
1519 } 1713 }
1520 1714
@@ -1657,7 +1851,9 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
1657 1851
1658 spin_lock(&plchan->vc.lock); 1852 spin_lock(&plchan->vc.lock);
1659 tx = plchan->at; 1853 tx = plchan->at;
1660 if (tx) { 1854 if (tx && tx->cyclic) {
1855 vchan_cyclic_callback(&tx->vd);
1856 } else if (tx) {
1661 plchan->at = NULL; 1857 plchan->at = NULL;
1662 /* 1858 /*
1663 * This descriptor is done, release its mux 1859 * This descriptor is done, release its mux
@@ -1851,6 +2047,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1851{ 2047{
1852 struct pl08x_driver_data *pl08x; 2048 struct pl08x_driver_data *pl08x;
1853 const struct vendor_data *vd = id->data; 2049 const struct vendor_data *vd = id->data;
2050 u32 tsfr_size;
1854 int ret = 0; 2051 int ret = 0;
1855 int i; 2052 int i;
1856 2053
@@ -1878,6 +2075,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1878 2075
1879 /* Initialize slave engine */ 2076 /* Initialize slave engine */
1880 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); 2077 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
2078 dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask);
1881 pl08x->slave.dev = &adev->dev; 2079 pl08x->slave.dev = &adev->dev;
1882 pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources; 2080 pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources;
1883 pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources; 2081 pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
@@ -1885,6 +2083,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1885 pl08x->slave.device_tx_status = pl08x_dma_tx_status; 2083 pl08x->slave.device_tx_status = pl08x_dma_tx_status;
1886 pl08x->slave.device_issue_pending = pl08x_issue_pending; 2084 pl08x->slave.device_issue_pending = pl08x_issue_pending;
1887 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; 2085 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
2086 pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic;
1888 pl08x->slave.device_control = pl08x_control; 2087 pl08x->slave.device_control = pl08x_control;
1889 2088
1890 /* Get the platform data */ 2089 /* Get the platform data */
@@ -1907,9 +2106,15 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1907 pl08x->mem_buses = pl08x->pd->mem_buses; 2106 pl08x->mem_buses = pl08x->pd->mem_buses;
1908 } 2107 }
1909 2108
2109 if (vd->pl080s)
2110 pl08x->lli_words = PL080S_LLI_WORDS;
2111 else
2112 pl08x->lli_words = PL080_LLI_WORDS;
2113 tsfr_size = MAX_NUM_TSFR_LLIS * pl08x->lli_words * sizeof(u32);
2114
1910 /* A DMA memory pool for LLIs, align on 1-byte boundary */ 2115 /* A DMA memory pool for LLIs, align on 1-byte boundary */
1911 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, 2116 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
1912 PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0); 2117 tsfr_size, PL08X_ALIGN, 0);
1913 if (!pl08x->pool) { 2118 if (!pl08x->pool) {
1914 ret = -ENOMEM; 2119 ret = -ENOMEM;
1915 goto out_no_lli_pool; 2120 goto out_no_lli_pool;
@@ -1952,6 +2157,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1952 2157
1953 ch->id = i; 2158 ch->id = i;
1954 ch->base = pl08x->base + PL080_Cx_BASE(i); 2159 ch->base = pl08x->base + PL080_Cx_BASE(i);
2160 ch->reg_config = ch->base + vd->config_offset;
1955 spin_lock_init(&ch->lock); 2161 spin_lock_init(&ch->lock);
1956 2162
1957 /* 2163 /*
@@ -1962,7 +2168,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1962 if (vd->nomadik) { 2168 if (vd->nomadik) {
1963 u32 val; 2169 u32 val;
1964 2170
1965 val = readl(ch->base + PL080_CH_CONFIG); 2171 val = readl(ch->reg_config);
1966 if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) { 2172 if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) {
1967 dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i); 2173 dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i);
1968 ch->locked = true; 2174 ch->locked = true;
@@ -2013,8 +2219,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2013 2219
2014 amba_set_drvdata(adev, pl08x); 2220 amba_set_drvdata(adev, pl08x);
2015 init_pl08x_debugfs(pl08x); 2221 init_pl08x_debugfs(pl08x);
2016 dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n", 2222 dev_info(&pl08x->adev->dev, "DMA: PL%03x%s rev%u at 0x%08llx irq %d\n",
2017 amba_part(adev), amba_rev(adev), 2223 amba_part(adev), pl08x->vd->pl080s ? "s" : "", amba_rev(adev),
2018 (unsigned long long)adev->res.start, adev->irq[0]); 2224 (unsigned long long)adev->res.start, adev->irq[0]);
2019 2225
2020 return 0; 2226 return 0;
@@ -2043,22 +2249,41 @@ out_no_pl08x:
2043 2249
2044/* PL080 has 8 channels and the PL080 have just 2 */ 2250/* PL080 has 8 channels and the PL080 have just 2 */
2045static struct vendor_data vendor_pl080 = { 2251static struct vendor_data vendor_pl080 = {
2252 .config_offset = PL080_CH_CONFIG,
2046 .channels = 8, 2253 .channels = 8,
2047 .dualmaster = true, 2254 .dualmaster = true,
2255 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
2048}; 2256};
2049 2257
2050static struct vendor_data vendor_nomadik = { 2258static struct vendor_data vendor_nomadik = {
2259 .config_offset = PL080_CH_CONFIG,
2051 .channels = 8, 2260 .channels = 8,
2052 .dualmaster = true, 2261 .dualmaster = true,
2053 .nomadik = true, 2262 .nomadik = true,
2263 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
2264};
2265
2266static struct vendor_data vendor_pl080s = {
2267 .config_offset = PL080S_CH_CONFIG,
2268 .channels = 8,
2269 .pl080s = true,
2270 .max_transfer_size = PL080S_CONTROL_TRANSFER_SIZE_MASK,
2054}; 2271};
2055 2272
2056static struct vendor_data vendor_pl081 = { 2273static struct vendor_data vendor_pl081 = {
2274 .config_offset = PL080_CH_CONFIG,
2057 .channels = 2, 2275 .channels = 2,
2058 .dualmaster = false, 2276 .dualmaster = false,
2277 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
2059}; 2278};
2060 2279
2061static struct amba_id pl08x_ids[] = { 2280static struct amba_id pl08x_ids[] = {
2281 /* Samsung PL080S variant */
2282 {
2283 .id = 0x0a141080,
2284 .mask = 0xffffffff,
2285 .data = &vendor_pl080s,
2286 },
2062 /* PL080 */ 2287 /* PL080 */
2063 { 2288 {
2064 .id = 0x00041080, 2289 .id = 0x00041080,