aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-02 19:35:49 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-02 19:35:49 -0400
commita727eaf64ff084a50b983fc506810c7a576b7ce3 (patch)
treecb82642227ed590ebc43b12cfad285a2d7681d5d /drivers/dma
parent755a9ba7bf24a45b6dbf8bb15a5a56c8ed12461a (diff)
parent45e70b7d48d53d5eb193c6b3f012b31ca135fb4c (diff)
Merge tag 'drivers-for-3.16' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc into next
Pull ARM SoC driver changes from Olof Johansson: "SoC-near driver changes that we're merging through our tree. Mostly because they depend on other changes we have staged, but in some cases because the driver maintainers preferred that we did it this way. This contains a largeish cleanup series of the omap_l3_noc bus driver, cpuidle rework for Exynos, some reset driver conversions and a long branch of TI EDMA fixes and cleanups, with more to come next release. The TI EDMA cleanups is a shared branch with the dmaengine tree, with a handful of Davinci-specific fixes on top. After discussion at last year's KS (and some more on the mailing lists), we are here adding a drivers/soc directory. The purpose of this is to keep per-vendor shared code that's needed by different drivers but that doesn't fit into the MFD (nor drivers/platform) model. We expect to keep merging contents for this hierarchy through arm-soc so we can keep an eye on what the vendors keep adding here and not making it a free-for-all to shove in crazy stuff" * tag 'drivers-for-3.16' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: (101 commits) cpufreq: exynos: Fix driver compilation with ARCH_MULTIPLATFORM tty: serial: msm: Remove direct access to GSBI power: reset: keystone-reset: introduce keystone reset driver Documentation: dt: add bindings for keystone pll control controller Documentation: dt: add bindings for keystone reset driver soc: qcom: fix of_device_id table ARM: EXYNOS: Fix kernel panic when unplugging CPU1 on exynos ARM: EXYNOS: Move the driver to drivers/cpuidle directory ARM: EXYNOS: Cleanup all unneeded headers from cpuidle.c ARM: EXYNOS: Pass the AFTR callback to the platform_data ARM: EXYNOS: Move S5P_CHECK_SLEEP into pm.c ARM: EXYNOS: Move the power sequence call in the cpu_pm notifier ARM: EXYNOS: Move the AFTR state function into pm.c ARM: EXYNOS: Encapsulate the AFTR code into a function ARM: EXYNOS: Disable cpuidle for exynos5440 ARM: EXYNOS: Encapsulate boot vector code into a function for cpuidle ARM: EXYNOS: Pass wakeup mask parameter to function for cpuidle ARM: EXYNOS: Remove ifdef for scu_enable in pm ARM: EXYNOS: Move scu_enable in the cpu_pm notifier ARM: EXYNOS: Use the cpu_pm notifier for pm ...
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/edma.c335
1 files changed, 260 insertions, 75 deletions
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 926360c2db6a..d08c4dedef35 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -57,14 +57,48 @@
57#define EDMA_MAX_SLOTS MAX_NR_SG 57#define EDMA_MAX_SLOTS MAX_NR_SG
58#define EDMA_DESCRIPTORS 16 58#define EDMA_DESCRIPTORS 16
59 59
60struct edma_pset {
61 u32 len;
62 dma_addr_t addr;
63 struct edmacc_param param;
64};
65
60struct edma_desc { 66struct edma_desc {
61 struct virt_dma_desc vdesc; 67 struct virt_dma_desc vdesc;
62 struct list_head node; 68 struct list_head node;
69 enum dma_transfer_direction direction;
63 int cyclic; 70 int cyclic;
64 int absync; 71 int absync;
65 int pset_nr; 72 int pset_nr;
73 struct edma_chan *echan;
66 int processed; 74 int processed;
67 struct edmacc_param pset[0]; 75
76 /*
77 * The following 4 elements are used for residue accounting.
78 *
79 * - processed_stat: the number of SG elements we have traversed
80 * so far to cover accounting. This is updated directly to processed
81 * during edma_callback and is always <= processed, because processed
82 * refers to the number of pending transfer (programmed to EDMA
83 * controller), where as processed_stat tracks number of transfers
84 * accounted for so far.
85 *
86 * - residue: The amount of bytes we have left to transfer for this desc
87 *
88 * - residue_stat: The residue in bytes of data we have covered
89 * so far for accounting. This is updated directly to residue
90 * during callbacks to keep it current.
91 *
92 * - sg_len: Tracks the length of the current intermediate transfer,
93 * this is required to update the residue during intermediate transfer
94 * completion callback.
95 */
96 int processed_stat;
97 u32 sg_len;
98 u32 residue;
99 u32 residue_stat;
100
101 struct edma_pset pset[0];
68}; 102};
69 103
70struct edma_cc; 104struct edma_cc;
@@ -136,12 +170,14 @@ static void edma_execute(struct edma_chan *echan)
136 /* Find out how many left */ 170 /* Find out how many left */
137 left = edesc->pset_nr - edesc->processed; 171 left = edesc->pset_nr - edesc->processed;
138 nslots = min(MAX_NR_SG, left); 172 nslots = min(MAX_NR_SG, left);
173 edesc->sg_len = 0;
139 174
140 /* Write descriptor PaRAM set(s) */ 175 /* Write descriptor PaRAM set(s) */
141 for (i = 0; i < nslots; i++) { 176 for (i = 0; i < nslots; i++) {
142 j = i + edesc->processed; 177 j = i + edesc->processed;
143 edma_write_slot(echan->slot[i], &edesc->pset[j]); 178 edma_write_slot(echan->slot[i], &edesc->pset[j].param);
144 dev_dbg(echan->vchan.chan.device->dev, 179 edesc->sg_len += edesc->pset[j].len;
180 dev_vdbg(echan->vchan.chan.device->dev,
145 "\n pset[%d]:\n" 181 "\n pset[%d]:\n"
146 " chnum\t%d\n" 182 " chnum\t%d\n"
147 " slot\t%d\n" 183 " slot\t%d\n"
@@ -154,14 +190,14 @@ static void edma_execute(struct edma_chan *echan)
154 " cidx\t%08x\n" 190 " cidx\t%08x\n"
155 " lkrld\t%08x\n", 191 " lkrld\t%08x\n",
156 j, echan->ch_num, echan->slot[i], 192 j, echan->ch_num, echan->slot[i],
157 edesc->pset[j].opt, 193 edesc->pset[j].param.opt,
158 edesc->pset[j].src, 194 edesc->pset[j].param.src,
159 edesc->pset[j].dst, 195 edesc->pset[j].param.dst,
160 edesc->pset[j].a_b_cnt, 196 edesc->pset[j].param.a_b_cnt,
161 edesc->pset[j].ccnt, 197 edesc->pset[j].param.ccnt,
162 edesc->pset[j].src_dst_bidx, 198 edesc->pset[j].param.src_dst_bidx,
163 edesc->pset[j].src_dst_cidx, 199 edesc->pset[j].param.src_dst_cidx,
164 edesc->pset[j].link_bcntrld); 200 edesc->pset[j].param.link_bcntrld);
165 /* Link to the previous slot if not the last set */ 201 /* Link to the previous slot if not the last set */
166 if (i != (nslots - 1)) 202 if (i != (nslots - 1))
167 edma_link(echan->slot[i], echan->slot[i+1]); 203 edma_link(echan->slot[i], echan->slot[i+1]);
@@ -183,7 +219,8 @@ static void edma_execute(struct edma_chan *echan)
183 } 219 }
184 220
185 if (edesc->processed <= MAX_NR_SG) { 221 if (edesc->processed <= MAX_NR_SG) {
186 dev_dbg(dev, "first transfer starting %d\n", echan->ch_num); 222 dev_dbg(dev, "first transfer starting on channel %d\n",
223 echan->ch_num);
187 edma_start(echan->ch_num); 224 edma_start(echan->ch_num);
188 } else { 225 } else {
189 dev_dbg(dev, "chan: %d: completed %d elements, resuming\n", 226 dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
@@ -197,7 +234,7 @@ static void edma_execute(struct edma_chan *echan)
197 * MAX_NR_SG 234 * MAX_NR_SG
198 */ 235 */
199 if (echan->missed) { 236 if (echan->missed) {
200 dev_dbg(dev, "missed event in execute detected\n"); 237 dev_dbg(dev, "missed event on channel %d\n", echan->ch_num);
201 edma_clean_channel(echan->ch_num); 238 edma_clean_channel(echan->ch_num);
202 edma_stop(echan->ch_num); 239 edma_stop(echan->ch_num);
203 edma_start(echan->ch_num); 240 edma_start(echan->ch_num);
@@ -242,6 +279,26 @@ static int edma_slave_config(struct edma_chan *echan,
242 return 0; 279 return 0;
243} 280}
244 281
282static int edma_dma_pause(struct edma_chan *echan)
283{
284 /* Pause/Resume only allowed with cyclic mode */
285 if (!echan->edesc->cyclic)
286 return -EINVAL;
287
288 edma_pause(echan->ch_num);
289 return 0;
290}
291
292static int edma_dma_resume(struct edma_chan *echan)
293{
294 /* Pause/Resume only allowed with cyclic mode */
295 if (!echan->edesc->cyclic)
296 return -EINVAL;
297
298 edma_resume(echan->ch_num);
299 return 0;
300}
301
245static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 302static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
246 unsigned long arg) 303 unsigned long arg)
247{ 304{
@@ -257,6 +314,14 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
257 config = (struct dma_slave_config *)arg; 314 config = (struct dma_slave_config *)arg;
258 ret = edma_slave_config(echan, config); 315 ret = edma_slave_config(echan, config);
259 break; 316 break;
317 case DMA_PAUSE:
318 ret = edma_dma_pause(echan);
319 break;
320
321 case DMA_RESUME:
322 ret = edma_dma_resume(echan);
323 break;
324
260 default: 325 default:
261 ret = -ENOSYS; 326 ret = -ENOSYS;
262 } 327 }
@@ -275,18 +340,23 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
275 * @dma_length: Total length of the DMA transfer 340 * @dma_length: Total length of the DMA transfer
276 * @direction: Direction of the transfer 341 * @direction: Direction of the transfer
277 */ 342 */
278static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset, 343static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
279 dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst, 344 dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
280 enum dma_slave_buswidth dev_width, unsigned int dma_length, 345 enum dma_slave_buswidth dev_width, unsigned int dma_length,
281 enum dma_transfer_direction direction) 346 enum dma_transfer_direction direction)
282{ 347{
283 struct edma_chan *echan = to_edma_chan(chan); 348 struct edma_chan *echan = to_edma_chan(chan);
284 struct device *dev = chan->device->dev; 349 struct device *dev = chan->device->dev;
350 struct edmacc_param *param = &epset->param;
285 int acnt, bcnt, ccnt, cidx; 351 int acnt, bcnt, ccnt, cidx;
286 int src_bidx, dst_bidx, src_cidx, dst_cidx; 352 int src_bidx, dst_bidx, src_cidx, dst_cidx;
287 int absync; 353 int absync;
288 354
289 acnt = dev_width; 355 acnt = dev_width;
356
357 /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
358 if (!burst)
359 burst = 1;
290 /* 360 /*
291 * If the maxburst is equal to the fifo width, use 361 * If the maxburst is equal to the fifo width, use
292 * A-synced transfers. This allows for large contiguous 362 * A-synced transfers. This allows for large contiguous
@@ -337,41 +407,50 @@ static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset,
337 cidx = acnt * bcnt; 407 cidx = acnt * bcnt;
338 } 408 }
339 409
410 epset->len = dma_length;
411
340 if (direction == DMA_MEM_TO_DEV) { 412 if (direction == DMA_MEM_TO_DEV) {
341 src_bidx = acnt; 413 src_bidx = acnt;
342 src_cidx = cidx; 414 src_cidx = cidx;
343 dst_bidx = 0; 415 dst_bidx = 0;
344 dst_cidx = 0; 416 dst_cidx = 0;
417 epset->addr = src_addr;
345 } else if (direction == DMA_DEV_TO_MEM) { 418 } else if (direction == DMA_DEV_TO_MEM) {
346 src_bidx = 0; 419 src_bidx = 0;
347 src_cidx = 0; 420 src_cidx = 0;
348 dst_bidx = acnt; 421 dst_bidx = acnt;
349 dst_cidx = cidx; 422 dst_cidx = cidx;
423 epset->addr = dst_addr;
424 } else if (direction == DMA_MEM_TO_MEM) {
425 src_bidx = acnt;
426 src_cidx = cidx;
427 dst_bidx = acnt;
428 dst_cidx = cidx;
350 } else { 429 } else {
351 dev_err(dev, "%s: direction not implemented yet\n", __func__); 430 dev_err(dev, "%s: direction not implemented yet\n", __func__);
352 return -EINVAL; 431 return -EINVAL;
353 } 432 }
354 433
355 pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); 434 param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
356 /* Configure A or AB synchronized transfers */ 435 /* Configure A or AB synchronized transfers */
357 if (absync) 436 if (absync)
358 pset->opt |= SYNCDIM; 437 param->opt |= SYNCDIM;
359 438
360 pset->src = src_addr; 439 param->src = src_addr;
361 pset->dst = dst_addr; 440 param->dst = dst_addr;
362 441
363 pset->src_dst_bidx = (dst_bidx << 16) | src_bidx; 442 param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
364 pset->src_dst_cidx = (dst_cidx << 16) | src_cidx; 443 param->src_dst_cidx = (dst_cidx << 16) | src_cidx;
365 444
366 pset->a_b_cnt = bcnt << 16 | acnt; 445 param->a_b_cnt = bcnt << 16 | acnt;
367 pset->ccnt = ccnt; 446 param->ccnt = ccnt;
368 /* 447 /*
369 * Only time when (bcntrld) auto reload is required is for 448 * Only time when (bcntrld) auto reload is required is for
370 * A-sync case, and in this case, a requirement of reload value 449 * A-sync case, and in this case, a requirement of reload value
371 * of SZ_64K-1 only is assured. 'link' is initially set to NULL 450 * of SZ_64K-1 only is assured. 'link' is initially set to NULL
372 * and then later will be populated by edma_execute. 451 * and then later will be populated by edma_execute.
373 */ 452 */
374 pset->link_bcntrld = 0xffffffff; 453 param->link_bcntrld = 0xffffffff;
375 return absync; 454 return absync;
376} 455}
377 456
@@ -401,23 +480,26 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
401 dev_width = echan->cfg.dst_addr_width; 480 dev_width = echan->cfg.dst_addr_width;
402 burst = echan->cfg.dst_maxburst; 481 burst = echan->cfg.dst_maxburst;
403 } else { 482 } else {
404 dev_err(dev, "%s: bad direction?\n", __func__); 483 dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
405 return NULL; 484 return NULL;
406 } 485 }
407 486
408 if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { 487 if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
409 dev_err(dev, "Undefined slave buswidth\n"); 488 dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
410 return NULL; 489 return NULL;
411 } 490 }
412 491
413 edesc = kzalloc(sizeof(*edesc) + sg_len * 492 edesc = kzalloc(sizeof(*edesc) + sg_len *
414 sizeof(edesc->pset[0]), GFP_ATOMIC); 493 sizeof(edesc->pset[0]), GFP_ATOMIC);
415 if (!edesc) { 494 if (!edesc) {
416 dev_dbg(dev, "Failed to allocate a descriptor\n"); 495 dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
417 return NULL; 496 return NULL;
418 } 497 }
419 498
420 edesc->pset_nr = sg_len; 499 edesc->pset_nr = sg_len;
500 edesc->residue = 0;
501 edesc->direction = direction;
502 edesc->echan = echan;
421 503
422 /* Allocate a PaRAM slot, if needed */ 504 /* Allocate a PaRAM slot, if needed */
423 nslots = min_t(unsigned, MAX_NR_SG, sg_len); 505 nslots = min_t(unsigned, MAX_NR_SG, sg_len);
@@ -429,7 +511,8 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
429 EDMA_SLOT_ANY); 511 EDMA_SLOT_ANY);
430 if (echan->slot[i] < 0) { 512 if (echan->slot[i] < 0) {
431 kfree(edesc); 513 kfree(edesc);
432 dev_err(dev, "Failed to allocate slot\n"); 514 dev_err(dev, "%s: Failed to allocate slot\n",
515 __func__);
433 return NULL; 516 return NULL;
434 } 517 }
435 } 518 }
@@ -452,16 +535,56 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
452 } 535 }
453 536
454 edesc->absync = ret; 537 edesc->absync = ret;
538 edesc->residue += sg_dma_len(sg);
455 539
456 /* If this is the last in a current SG set of transactions, 540 /* If this is the last in a current SG set of transactions,
457 enable interrupts so that next set is processed */ 541 enable interrupts so that next set is processed */
458 if (!((i+1) % MAX_NR_SG)) 542 if (!((i+1) % MAX_NR_SG))
459 edesc->pset[i].opt |= TCINTEN; 543 edesc->pset[i].param.opt |= TCINTEN;
460 544
461 /* If this is the last set, enable completion interrupt flag */ 545 /* If this is the last set, enable completion interrupt flag */
462 if (i == sg_len - 1) 546 if (i == sg_len - 1)
463 edesc->pset[i].opt |= TCINTEN; 547 edesc->pset[i].param.opt |= TCINTEN;
464 } 548 }
549 edesc->residue_stat = edesc->residue;
550
551 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
552}
553
554struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
555 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
556 size_t len, unsigned long tx_flags)
557{
558 int ret;
559 struct edma_desc *edesc;
560 struct device *dev = chan->device->dev;
561 struct edma_chan *echan = to_edma_chan(chan);
562
563 if (unlikely(!echan || !len))
564 return NULL;
565
566 edesc = kzalloc(sizeof(*edesc) + sizeof(edesc->pset[0]), GFP_ATOMIC);
567 if (!edesc) {
568 dev_dbg(dev, "Failed to allocate a descriptor\n");
569 return NULL;
570 }
571
572 edesc->pset_nr = 1;
573
574 ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1,
575 DMA_SLAVE_BUSWIDTH_4_BYTES, len, DMA_MEM_TO_MEM);
576 if (ret < 0)
577 return NULL;
578
579 edesc->absync = ret;
580
581 /*
582 * Enable intermediate transfer chaining to re-trigger channel
583 * on completion of every TR, and enable transfer-completion
584 * interrupt on completion of the whole transfer.
585 */
586 edesc->pset[0].param.opt |= ITCCHEN;
587 edesc->pset[0].param.opt |= TCINTEN;
465 588
466 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); 589 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
467} 590}
@@ -493,12 +616,12 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
493 dev_width = echan->cfg.dst_addr_width; 616 dev_width = echan->cfg.dst_addr_width;
494 burst = echan->cfg.dst_maxburst; 617 burst = echan->cfg.dst_maxburst;
495 } else { 618 } else {
496 dev_err(dev, "%s: bad direction?\n", __func__); 619 dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
497 return NULL; 620 return NULL;
498 } 621 }
499 622
500 if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { 623 if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
501 dev_err(dev, "Undefined slave buswidth\n"); 624 dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
502 return NULL; 625 return NULL;
503 } 626 }
504 627
@@ -523,16 +646,18 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
523 edesc = kzalloc(sizeof(*edesc) + nslots * 646 edesc = kzalloc(sizeof(*edesc) + nslots *
524 sizeof(edesc->pset[0]), GFP_ATOMIC); 647 sizeof(edesc->pset[0]), GFP_ATOMIC);
525 if (!edesc) { 648 if (!edesc) {
526 dev_dbg(dev, "Failed to allocate a descriptor\n"); 649 dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
527 return NULL; 650 return NULL;
528 } 651 }
529 652
530 edesc->cyclic = 1; 653 edesc->cyclic = 1;
531 edesc->pset_nr = nslots; 654 edesc->pset_nr = nslots;
655 edesc->residue = edesc->residue_stat = buf_len;
656 edesc->direction = direction;
657 edesc->echan = echan;
532 658
533 dev_dbg(dev, "%s: nslots=%d\n", __func__, nslots); 659 dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
534 dev_dbg(dev, "%s: period_len=%d\n", __func__, period_len); 660 __func__, echan->ch_num, nslots, period_len, buf_len);
535 dev_dbg(dev, "%s: buf_len=%d\n", __func__, buf_len);
536 661
537 for (i = 0; i < nslots; i++) { 662 for (i = 0; i < nslots; i++) {
538 /* Allocate a PaRAM slot, if needed */ 663 /* Allocate a PaRAM slot, if needed */
@@ -542,7 +667,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
542 EDMA_SLOT_ANY); 667 EDMA_SLOT_ANY);
543 if (echan->slot[i] < 0) { 668 if (echan->slot[i] < 0) {
544 kfree(edesc); 669 kfree(edesc);
545 dev_err(dev, "Failed to allocate slot\n"); 670 dev_err(dev, "%s: Failed to allocate slot\n",
671 __func__);
546 return NULL; 672 return NULL;
547 } 673 }
548 } 674 }
@@ -566,8 +692,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
566 else 692 else
567 src_addr += period_len; 693 src_addr += period_len;
568 694
569 dev_dbg(dev, "%s: Configure period %d of buf:\n", __func__, i); 695 dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
570 dev_dbg(dev, 696 dev_vdbg(dev,
571 "\n pset[%d]:\n" 697 "\n pset[%d]:\n"
572 " chnum\t%d\n" 698 " chnum\t%d\n"
573 " slot\t%d\n" 699 " slot\t%d\n"
@@ -580,14 +706,14 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
580 " cidx\t%08x\n" 706 " cidx\t%08x\n"
581 " lkrld\t%08x\n", 707 " lkrld\t%08x\n",
582 i, echan->ch_num, echan->slot[i], 708 i, echan->ch_num, echan->slot[i],
583 edesc->pset[i].opt, 709 edesc->pset[i].param.opt,
584 edesc->pset[i].src, 710 edesc->pset[i].param.src,
585 edesc->pset[i].dst, 711 edesc->pset[i].param.dst,
586 edesc->pset[i].a_b_cnt, 712 edesc->pset[i].param.a_b_cnt,
587 edesc->pset[i].ccnt, 713 edesc->pset[i].param.ccnt,
588 edesc->pset[i].src_dst_bidx, 714 edesc->pset[i].param.src_dst_bidx,
589 edesc->pset[i].src_dst_cidx, 715 edesc->pset[i].param.src_dst_cidx,
590 edesc->pset[i].link_bcntrld); 716 edesc->pset[i].param.link_bcntrld);
591 717
592 edesc->absync = ret; 718 edesc->absync = ret;
593 719
@@ -595,7 +721,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
595 * Enable interrupts for every period because callback 721 * Enable interrupts for every period because callback
596 * has to be called for every period. 722 * has to be called for every period.
597 */ 723 */
598 edesc->pset[i].opt |= TCINTEN; 724 edesc->pset[i].param.opt |= TCINTEN;
599 } 725 }
600 726
601 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); 727 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
@@ -606,7 +732,6 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
606 struct edma_chan *echan = data; 732 struct edma_chan *echan = data;
607 struct device *dev = echan->vchan.chan.device->dev; 733 struct device *dev = echan->vchan.chan.device->dev;
608 struct edma_desc *edesc; 734 struct edma_desc *edesc;
609 unsigned long flags;
610 struct edmacc_param p; 735 struct edmacc_param p;
611 736
612 edesc = echan->edesc; 737 edesc = echan->edesc;
@@ -617,27 +742,34 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
617 742
618 switch (ch_status) { 743 switch (ch_status) {
619 case EDMA_DMA_COMPLETE: 744 case EDMA_DMA_COMPLETE:
620 spin_lock_irqsave(&echan->vchan.lock, flags); 745 spin_lock(&echan->vchan.lock);
621 746
622 if (edesc) { 747 if (edesc) {
623 if (edesc->cyclic) { 748 if (edesc->cyclic) {
624 vchan_cyclic_callback(&edesc->vdesc); 749 vchan_cyclic_callback(&edesc->vdesc);
625 } else if (edesc->processed == edesc->pset_nr) { 750 } else if (edesc->processed == edesc->pset_nr) {
626 dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num); 751 dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num);
752 edesc->residue = 0;
627 edma_stop(echan->ch_num); 753 edma_stop(echan->ch_num);
628 vchan_cookie_complete(&edesc->vdesc); 754 vchan_cookie_complete(&edesc->vdesc);
629 edma_execute(echan); 755 edma_execute(echan);
630 } else { 756 } else {
631 dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num); 757 dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num);
758
759 /* Update statistics for tx_status */
760 edesc->residue -= edesc->sg_len;
761 edesc->residue_stat = edesc->residue;
762 edesc->processed_stat = edesc->processed;
763
632 edma_execute(echan); 764 edma_execute(echan);
633 } 765 }
634 } 766 }
635 767
636 spin_unlock_irqrestore(&echan->vchan.lock, flags); 768 spin_unlock(&echan->vchan.lock);
637 769
638 break; 770 break;
639 case EDMA_DMA_CC_ERROR: 771 case EDMA_DMA_CC_ERROR:
640 spin_lock_irqsave(&echan->vchan.lock, flags); 772 spin_lock(&echan->vchan.lock);
641 773
642 edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p); 774 edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p);
643 775
@@ -668,7 +800,7 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
668 edma_trigger_channel(echan->ch_num); 800 edma_trigger_channel(echan->ch_num);
669 } 801 }
670 802
671 spin_unlock_irqrestore(&echan->vchan.lock, flags); 803 spin_unlock(&echan->vchan.lock);
672 804
673 break; 805 break;
674 default: 806 default:
@@ -704,7 +836,7 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
704 echan->alloced = true; 836 echan->alloced = true;
705 echan->slot[0] = echan->ch_num; 837 echan->slot[0] = echan->ch_num;
706 838
707 dev_dbg(dev, "allocated channel for %u:%u\n", 839 dev_dbg(dev, "allocated channel %d for %u:%u\n", echan->ch_num,
708 EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); 840 EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
709 841
710 return 0; 842 return 0;
@@ -756,23 +888,52 @@ static void edma_issue_pending(struct dma_chan *chan)
756 spin_unlock_irqrestore(&echan->vchan.lock, flags); 888 spin_unlock_irqrestore(&echan->vchan.lock, flags);
757} 889}
758 890
759static size_t edma_desc_size(struct edma_desc *edesc) 891static u32 edma_residue(struct edma_desc *edesc)
760{ 892{
893 bool dst = edesc->direction == DMA_DEV_TO_MEM;
894 struct edma_pset *pset = edesc->pset;
895 dma_addr_t done, pos;
761 int i; 896 int i;
762 size_t size; 897
763 898 /*
764 if (edesc->absync) 899 * We always read the dst/src position from the first RamPar
765 for (size = i = 0; i < edesc->pset_nr; i++) 900 * pset. That's the one which is active now.
766 size += (edesc->pset[i].a_b_cnt & 0xffff) * 901 */
767 (edesc->pset[i].a_b_cnt >> 16) * 902 pos = edma_get_position(edesc->echan->slot[0], dst);
768 edesc->pset[i].ccnt; 903
769 else 904 /*
770 size = (edesc->pset[0].a_b_cnt & 0xffff) * 905 * Cyclic is simple. Just subtract pset[0].addr from pos.
771 (edesc->pset[0].a_b_cnt >> 16) + 906 *
772 (edesc->pset[0].a_b_cnt & 0xffff) * 907 * We never update edesc->residue in the cyclic case, so we
773 (SZ_64K - 1) * edesc->pset[0].ccnt; 908 * can tell the remaining room to the end of the circular
774 909 * buffer.
775 return size; 910 */
911 if (edesc->cyclic) {
912 done = pos - pset->addr;
913 edesc->residue_stat = edesc->residue - done;
914 return edesc->residue_stat;
915 }
916
917 /*
918 * For SG operation we catch up with the last processed
919 * status.
920 */
921 pset += edesc->processed_stat;
922
923 for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) {
924 /*
925 * If we are inside this pset address range, we know
926 * this is the active one. Get the current delta and
927 * stop walking the psets.
928 */
929 if (pos >= pset->addr && pos < pset->addr + pset->len)
930 return edesc->residue_stat - (pos - pset->addr);
931
932 /* Otherwise mark it done and update residue_stat. */
933 edesc->processed_stat++;
934 edesc->residue_stat -= pset->len;
935 }
936 return edesc->residue_stat;
776} 937}
777 938
778/* Check request completion status */ 939/* Check request completion status */
@@ -790,13 +951,10 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
790 return ret; 951 return ret;
791 952
792 spin_lock_irqsave(&echan->vchan.lock, flags); 953 spin_lock_irqsave(&echan->vchan.lock, flags);
793 vdesc = vchan_find_desc(&echan->vchan, cookie); 954 if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie)
794 if (vdesc) { 955 txstate->residue = edma_residue(echan->edesc);
795 txstate->residue = edma_desc_size(to_edma_desc(&vdesc->tx)); 956 else if ((vdesc = vchan_find_desc(&echan->vchan, cookie)))
796 } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) { 957 txstate->residue = to_edma_desc(&vdesc->tx)->residue;
797 struct edma_desc *edesc = echan->edesc;
798 txstate->residue = edma_desc_size(edesc);
799 }
800 spin_unlock_irqrestore(&echan->vchan.lock, flags); 958 spin_unlock_irqrestore(&echan->vchan.lock, flags);
801 959
802 return ret; 960 return ret;
@@ -822,18 +980,43 @@ static void __init edma_chan_init(struct edma_cc *ecc,
822 } 980 }
823} 981}
824 982
983#define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
984 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
985 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
986
987static int edma_dma_device_slave_caps(struct dma_chan *dchan,
988 struct dma_slave_caps *caps)
989{
990 caps->src_addr_widths = EDMA_DMA_BUSWIDTHS;
991 caps->dstn_addr_widths = EDMA_DMA_BUSWIDTHS;
992 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
993 caps->cmd_pause = true;
994 caps->cmd_terminate = true;
995 caps->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
996
997 return 0;
998}
999
825static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, 1000static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
826 struct device *dev) 1001 struct device *dev)
827{ 1002{
828 dma->device_prep_slave_sg = edma_prep_slave_sg; 1003 dma->device_prep_slave_sg = edma_prep_slave_sg;
829 dma->device_prep_dma_cyclic = edma_prep_dma_cyclic; 1004 dma->device_prep_dma_cyclic = edma_prep_dma_cyclic;
1005 dma->device_prep_dma_memcpy = edma_prep_dma_memcpy;
830 dma->device_alloc_chan_resources = edma_alloc_chan_resources; 1006 dma->device_alloc_chan_resources = edma_alloc_chan_resources;
831 dma->device_free_chan_resources = edma_free_chan_resources; 1007 dma->device_free_chan_resources = edma_free_chan_resources;
832 dma->device_issue_pending = edma_issue_pending; 1008 dma->device_issue_pending = edma_issue_pending;
833 dma->device_tx_status = edma_tx_status; 1009 dma->device_tx_status = edma_tx_status;
834 dma->device_control = edma_control; 1010 dma->device_control = edma_control;
1011 dma->device_slave_caps = edma_dma_device_slave_caps;
835 dma->dev = dev; 1012 dma->dev = dev;
836 1013
1014 /*
1015 * code using dma memcpy must make sure alignment of
1016 * length is at dma->copy_align boundary.
1017 */
1018 dma->copy_align = DMA_SLAVE_BUSWIDTH_4_BYTES;
1019
837 INIT_LIST_HEAD(&dma->channels); 1020 INIT_LIST_HEAD(&dma->channels);
838} 1021}
839 1022
@@ -861,6 +1044,8 @@ static int edma_probe(struct platform_device *pdev)
861 1044
862 dma_cap_zero(ecc->dma_slave.cap_mask); 1045 dma_cap_zero(ecc->dma_slave.cap_mask);
863 dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask); 1046 dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask);
1047 dma_cap_set(DMA_CYCLIC, ecc->dma_slave.cap_mask);
1048 dma_cap_set(DMA_MEMCPY, ecc->dma_slave.cap_mask);
864 1049
865 edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev); 1050 edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev);
866 1051