aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2012-05-15 13:34:33 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-07-01 09:16:26 -0400
commitd94443256f8f19732f0b95f65ec344c17751aa15 (patch)
tree3240befb10ed346615edc1477f1a41db4511a6a6 /drivers/dma
parent63fe23c34e1c18725bd1459897bf3a46335d88b7 (diff)
dmaengine: sa11x0-dma: add cyclic DMA support
Add support for cyclic DMA on sa11x0 platforms. This follows the discussed behaviour that the callback will be called at some point after period expires, and may coalesce multiple period expiries into one callback (due to the tasklet behaviour.) Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/sa11x0-dma.c108
1 files changed, 92 insertions, 16 deletions
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index db4fcbd80c7d..f5a73606217e 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -78,6 +78,8 @@ struct sa11x0_dma_desc {
78 78
79 u32 ddar; 79 u32 ddar;
80 size_t size; 80 size_t size;
81 unsigned period;
82 bool cyclic;
81 83
82 unsigned sglen; 84 unsigned sglen;
83 struct sa11x0_dma_sg sg[0]; 85 struct sa11x0_dma_sg sg[0];
@@ -178,19 +180,24 @@ static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
178 return; 180 return;
179 181
180 if (p->sg_load == txd->sglen) { 182 if (p->sg_load == txd->sglen) {
181 struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c); 183 if (!txd->cyclic) {
184 struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
182 185
183 /* 186 /*
184 * We have reached the end of the current descriptor. 187 * We have reached the end of the current descriptor.
185 * Peek at the next descriptor, and if compatible with 188 * Peek at the next descriptor, and if compatible with
186 * the current, start processing it. 189 * the current, start processing it.
187 */ 190 */
188 if (txn && txn->ddar == txd->ddar) { 191 if (txn && txn->ddar == txd->ddar) {
189 txd = txn; 192 txd = txn;
190 sa11x0_dma_start_desc(p, txn); 193 sa11x0_dma_start_desc(p, txn);
194 } else {
195 p->txd_load = NULL;
196 return;
197 }
191 } else { 198 } else {
192 p->txd_load = NULL; 199 /* Cyclic: reset back to beginning */
193 return; 200 p->sg_load = 0;
194 } 201 }
195 } 202 }
196 203
@@ -224,13 +231,21 @@ static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
224 struct sa11x0_dma_desc *txd = p->txd_done; 231 struct sa11x0_dma_desc *txd = p->txd_done;
225 232
226 if (++p->sg_done == txd->sglen) { 233 if (++p->sg_done == txd->sglen) {
227 vchan_cookie_complete(&txd->vd); 234 if (!txd->cyclic) {
235 vchan_cookie_complete(&txd->vd);
228 236
229 p->sg_done = 0; 237 p->sg_done = 0;
230 p->txd_done = p->txd_load; 238 p->txd_done = p->txd_load;
239
240 if (!p->txd_done)
241 tasklet_schedule(&p->dev->task);
242 } else {
243 if ((p->sg_done % txd->period) == 0)
244 vchan_cyclic_callback(&txd->vd);
231 245
232 if (!p->txd_done) 246 /* Cyclic: reset back to beginning */
233 tasklet_schedule(&p->dev->task); 247 p->sg_done = 0;
248 }
234 } 249 }
235 250
236 sa11x0_dma_start_sg(p, c); 251 sa11x0_dma_start_sg(p, c);
@@ -597,6 +612,65 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
597 return vchan_tx_prep(&c->vc, &txd->vd, flags); 612 return vchan_tx_prep(&c->vc, &txd->vd, flags);
598} 613}
599 614
615static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
616 struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
617 enum dma_transfer_direction dir, void *context)
618{
619 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
620 struct sa11x0_dma_desc *txd;
621 unsigned i, j, k, sglen, sgperiod;
622
623 /* SA11x0 channels can only operate in their native direction */
624 if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
625 dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
626 &c->vc, c->ddar, dir);
627 return NULL;
628 }
629
630 sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN);
631 sglen = size * sgperiod / period;
632
633 /* Do not allow zero-sized txds */
634 if (sglen == 0)
635 return NULL;
636
637 txd = kzalloc(sizeof(*txd) + sglen * sizeof(txd->sg[0]), GFP_ATOMIC);
638 if (!txd) {
639 dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
640 return NULL;
641 }
642
643 for (i = k = 0; i < size / period; i++) {
644 size_t tlen, len = period;
645
646 for (j = 0; j < sgperiod; j++, k++) {
647 tlen = len;
648
649 if (tlen > DMA_MAX_SIZE) {
650 unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN);
651 tlen = (tlen / mult) & ~DMA_ALIGN;
652 }
653
654 txd->sg[k].addr = addr;
655 txd->sg[k].len = tlen;
656 addr += tlen;
657 len -= tlen;
658 }
659
660 WARN_ON(len != 0);
661 }
662
663 WARN_ON(k != sglen);
664
665 txd->ddar = c->ddar;
666 txd->size = size;
667 txd->sglen = sglen;
668 txd->cyclic = 1;
669 txd->period = sgperiod;
670
671 return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
672}
673
600static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg) 674static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg)
601{ 675{
602 u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW); 676 u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW);
@@ -867,7 +941,9 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev)
867 } 941 }
868 942
869 dma_cap_set(DMA_SLAVE, d->slave.cap_mask); 943 dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
944 dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
870 d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg; 945 d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
946 d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic;
871 ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev); 947 ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev);
872 if (ret) { 948 if (ret) {
873 dev_warn(d->slave.dev, "failed to register slave async device: %d\n", 949 dev_warn(d->slave.dev, "failed to register slave async device: %d\n",