aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/omap-dma.c
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2013-11-02 13:07:09 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2014-04-03 19:27:50 -0400
commitfa3ad86ae0576b2c721800cc4d46864aa6d31ffd (patch)
tree05b030142b8aa5213530cdfa257b748113b6e3c0 /drivers/dma/omap-dma.c
parent913a2d0c6952283bc9323cb9152af87f792ff4c4 (diff)
dmaengine: omap-dma: control start/stop directly
Program the non-cyclic mode DMA start/stop directly, rather than via arch/arm/plat-omap/dma.c. Acked-by: Tony Lindgren <tony@atomide.com> Acked-by: Vinod Koul <vinod.koul@intel.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'drivers/dma/omap-dma.c')
-rw-r--r--drivers/dma/omap-dma.c151
1 files changed, 141 insertions, 10 deletions
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 8c5c862f01ed..7aa5ff7ab935 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -5,6 +5,7 @@
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8#include <linux/delay.h>
8#include <linux/dmaengine.h> 9#include <linux/dmaengine.h>
9#include <linux/dma-mapping.h> 10#include <linux/dma-mapping.h>
10#include <linux/err.h> 11#include <linux/err.h>
@@ -60,6 +61,7 @@ struct omap_desc {
60 uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */ 61 uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */
61 uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */ 62 uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */
62 uint8_t periph_port; /* Peripheral port */ 63 uint8_t periph_port; /* Peripheral port */
64 uint16_t cicr; /* CICR value */
63 65
64 unsigned sglen; 66 unsigned sglen;
65 struct omap_sg sg[0]; 67 struct omap_sg sg[0];
@@ -95,6 +97,111 @@ static void omap_dma_desc_free(struct virt_dma_desc *vd)
95 kfree(container_of(vd, struct omap_desc, vd)); 97 kfree(container_of(vd, struct omap_desc, vd));
96} 98}
97 99
100static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
101{
102 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
103 uint32_t val;
104
105 if (__dma_omap15xx(od->plat->dma_attr))
106 c->plat->dma_write(0, CPC, c->dma_ch);
107 else
108 c->plat->dma_write(0, CDAC, c->dma_ch);
109
110 if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
111 val = c->plat->dma_read(CLNK_CTRL, c->dma_ch);
112
113 if (dma_omap1())
114 val &= ~(1 << 14);
115
116 val |= c->dma_ch | 1 << 15;
117
118 c->plat->dma_write(val, CLNK_CTRL, c->dma_ch);
119 } else if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS)
120 c->plat->dma_write(c->dma_ch, CLNK_CTRL, c->dma_ch);
121
122 /* Clear CSR */
123 if (dma_omap1())
124 c->plat->dma_read(CSR, c->dma_ch);
125 else
126 c->plat->dma_write(~0, CSR, c->dma_ch);
127
128 /* Enable interrupts */
129 c->plat->dma_write(d->cicr, CICR, c->dma_ch);
130
131 val = c->plat->dma_read(CCR, c->dma_ch);
132 if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING)
133 val |= OMAP_DMA_CCR_BUFFERING_DISABLE;
134 val |= OMAP_DMA_CCR_EN;
135 mb();
136 c->plat->dma_write(val, CCR, c->dma_ch);
137}
138
139static void omap_dma_stop(struct omap_chan *c)
140{
141 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
142 uint32_t val;
143
144 /* disable irq */
145 c->plat->dma_write(0, CICR, c->dma_ch);
146
147 /* Clear CSR */
148 if (dma_omap1())
149 c->plat->dma_read(CSR, c->dma_ch);
150 else
151 c->plat->dma_write(~0, CSR, c->dma_ch);
152
153 val = c->plat->dma_read(CCR, c->dma_ch);
154 if (od->plat->errata & DMA_ERRATA_i541 &&
155 val & OMAP_DMA_CCR_SEL_SRC_DST_SYNC) {
156 uint32_t sysconfig;
157 unsigned i;
158
159 sysconfig = c->plat->dma_read(OCP_SYSCONFIG, c->dma_ch);
160 val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
161 val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
162 c->plat->dma_write(val, OCP_SYSCONFIG, c->dma_ch);
163
164 val = c->plat->dma_read(CCR, c->dma_ch);
165 val &= ~OMAP_DMA_CCR_EN;
166 c->plat->dma_write(val, CCR, c->dma_ch);
167
168 /* Wait for sDMA FIFO to drain */
169 for (i = 0; ; i++) {
170 val = c->plat->dma_read(CCR, c->dma_ch);
171 if (!(val & (OMAP_DMA_CCR_RD_ACTIVE | OMAP_DMA_CCR_WR_ACTIVE)))
172 break;
173
174 if (i > 100)
175 break;
176
177 udelay(5);
178 }
179
180 if (val & (OMAP_DMA_CCR_RD_ACTIVE | OMAP_DMA_CCR_WR_ACTIVE))
181 dev_err(c->vc.chan.device->dev,
182 "DMA drain did not complete on lch %d\n",
183 c->dma_ch);
184
185 c->plat->dma_write(sysconfig, OCP_SYSCONFIG, c->dma_ch);
186 } else {
187 val &= ~OMAP_DMA_CCR_EN;
188 c->plat->dma_write(val, CCR, c->dma_ch);
189 }
190
191 mb();
192
193 if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
194 val = c->plat->dma_read(CLNK_CTRL, c->dma_ch);
195
196 if (dma_omap1())
197 val |= 1 << 14; /* set the STOP_LNK bit */
198 else
199 val &= ~(1 << 15); /* Clear the ENABLE_LNK bit */
200
201 c->plat->dma_write(val, CLNK_CTRL, c->dma_ch);
202 }
203}
204
98static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, 205static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
99 unsigned idx) 206 unsigned idx)
100{ 207{
@@ -113,7 +220,7 @@ static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
113 c->plat->dma_write(sg->en, CEN, c->dma_ch); 220 c->plat->dma_write(sg->en, CEN, c->dma_ch);
114 c->plat->dma_write(sg->fn, CFN, c->dma_ch); 221 c->plat->dma_write(sg->fn, CFN, c->dma_ch);
115 222
116 omap_start_dma(c->dma_ch); 223 omap_dma_start(c, d);
117} 224}
118 225
119static void omap_dma_start_desc(struct omap_chan *c) 226static void omap_dma_start_desc(struct omap_chan *c)
@@ -434,6 +541,12 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
434 d->sync_mode = OMAP_DMA_SYNC_FRAME; 541 d->sync_mode = OMAP_DMA_SYNC_FRAME;
435 d->sync_type = sync_type; 542 d->sync_type = sync_type;
436 d->periph_port = OMAP_DMA_PORT_TIPB; 543 d->periph_port = OMAP_DMA_PORT_TIPB;
544 d->cicr = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
545
546 if (dma_omap1())
547 d->cicr |= OMAP1_DMA_TOUT_IRQ;
548 else
549 d->cicr |= OMAP2_DMA_MISALIGNED_ERR_IRQ | OMAP2_DMA_TRANS_ERR_IRQ;
437 550
438 /* 551 /*
439 * Build our scatterlist entries: each contains the address, 552 * Build our scatterlist entries: each contains the address,
@@ -463,6 +576,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
463 size_t period_len, enum dma_transfer_direction dir, unsigned long flags, 576 size_t period_len, enum dma_transfer_direction dir, unsigned long flags,
464 void *context) 577 void *context)
465{ 578{
579 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
466 struct omap_chan *c = to_omap_dma_chan(chan); 580 struct omap_chan *c = to_omap_dma_chan(chan);
467 enum dma_slave_buswidth dev_width; 581 enum dma_slave_buswidth dev_width;
468 struct omap_desc *d; 582 struct omap_desc *d;
@@ -519,15 +633,25 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
519 d->sg[0].en = period_len / es_bytes[es]; 633 d->sg[0].en = period_len / es_bytes[es];
520 d->sg[0].fn = buf_len / period_len; 634 d->sg[0].fn = buf_len / period_len;
521 d->sglen = 1; 635 d->sglen = 1;
636 d->cicr = OMAP_DMA_DROP_IRQ;
637 if (flags & DMA_PREP_INTERRUPT)
638 d->cicr |= OMAP_DMA_FRAME_IRQ;
639
640 if (dma_omap1())
641 d->cicr |= OMAP1_DMA_TOUT_IRQ;
642 else
643 d->cicr |= OMAP2_DMA_MISALIGNED_ERR_IRQ | OMAP2_DMA_TRANS_ERR_IRQ;
522 644
523 if (!c->cyclic) { 645 if (!c->cyclic) {
524 c->cyclic = true; 646 c->cyclic = true;
525 omap_dma_link_lch(c->dma_ch, c->dma_ch);
526 647
527 if (flags & DMA_PREP_INTERRUPT) 648 if (__dma_omap15xx(od->plat->dma_attr)) {
528 omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ); 649 uint32_t val;
529 650
530 omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ); 651 val = c->plat->dma_read(CCR, c->dma_ch);
652 val |= 3 << 8;
653 c->plat->dma_write(val, CCR, c->dma_ch);
654 }
531 } 655 }
532 656
533 if (dma_omap2plus()) { 657 if (dma_omap2plus()) {
@@ -568,20 +692,27 @@ static int omap_dma_terminate_all(struct omap_chan *c)
568 692
569 /* 693 /*
570 * Stop DMA activity: we assume the callback will not be called 694 * Stop DMA activity: we assume the callback will not be called
571 * after omap_stop_dma() returns (even if it does, it will see 695 * after omap_dma_stop() returns (even if it does, it will see
572 * c->desc is NULL and exit.) 696 * c->desc is NULL and exit.)
573 */ 697 */
574 if (c->desc) { 698 if (c->desc) {
575 c->desc = NULL; 699 c->desc = NULL;
576 /* Avoid stopping the dma twice */ 700 /* Avoid stopping the dma twice */
577 if (!c->paused) 701 if (!c->paused)
578 omap_stop_dma(c->dma_ch); 702 omap_dma_stop(c);
579 } 703 }
580 704
581 if (c->cyclic) { 705 if (c->cyclic) {
582 c->cyclic = false; 706 c->cyclic = false;
583 c->paused = false; 707 c->paused = false;
584 omap_dma_unlink_lch(c->dma_ch, c->dma_ch); 708
709 if (__dma_omap15xx(od->plat->dma_attr)) {
710 uint32_t val;
711
712 val = c->plat->dma_read(CCR, c->dma_ch);
713 val &= ~(3 << 8);
714 c->plat->dma_write(val, CCR, c->dma_ch);
715 }
585 } 716 }
586 717
587 vchan_get_all_descriptors(&c->vc, &head); 718 vchan_get_all_descriptors(&c->vc, &head);
@@ -598,7 +729,7 @@ static int omap_dma_pause(struct omap_chan *c)
598 return -EINVAL; 729 return -EINVAL;
599 730
600 if (!c->paused) { 731 if (!c->paused) {
601 omap_stop_dma(c->dma_ch); 732 omap_dma_stop(c);
602 c->paused = true; 733 c->paused = true;
603 } 734 }
604 735
@@ -612,7 +743,7 @@ static int omap_dma_resume(struct omap_chan *c)
612 return -EINVAL; 743 return -EINVAL;
613 744
614 if (c->paused) { 745 if (c->paused) {
615 omap_start_dma(c->dma_ch); 746 omap_dma_start(c, c->desc);
616 c->paused = false; 747 c->paused = false;
617 } 748 }
618 749