aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/dma/omap-dma.c93
1 files changed, 89 insertions, 4 deletions
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index f2e919fcea36..ae0561826137 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -33,6 +33,7 @@ struct omap_chan {
33 33
34 struct dma_slave_config cfg; 34 struct dma_slave_config cfg;
35 unsigned dma_sig; 35 unsigned dma_sig;
36 bool cyclic;
36 37
37 int dma_ch; 38 int dma_ch;
38 struct omap_desc *desc; 39 struct omap_desc *desc;
@@ -138,11 +139,15 @@ static void omap_dma_callback(int ch, u16 status, void *data)
138 spin_lock_irqsave(&c->vc.lock, flags); 139 spin_lock_irqsave(&c->vc.lock, flags);
139 d = c->desc; 140 d = c->desc;
140 if (d) { 141 if (d) {
141 if (++c->sgidx < d->sglen) { 142 if (!c->cyclic) {
142 omap_dma_start_sg(c, d, c->sgidx); 143 if (++c->sgidx < d->sglen) {
144 omap_dma_start_sg(c, d, c->sgidx);
145 } else {
146 omap_dma_start_desc(c);
147 vchan_cookie_complete(&d->vd);
148 }
143 } else { 149 } else {
144 omap_dma_start_desc(c); 150 vchan_cyclic_callback(&d->vd);
145 vchan_cookie_complete(&d->vd);
146 } 151 }
147 } 152 }
148 spin_unlock_irqrestore(&c->vc.lock, flags); 153 spin_unlock_irqrestore(&c->vc.lock, flags);
@@ -358,6 +363,79 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
358 return vchan_tx_prep(&c->vc, &d->vd, tx_flags); 363 return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
359} 364}
360 365
366static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
367 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
368 size_t period_len, enum dma_transfer_direction dir, void *context)
369{
370 struct omap_chan *c = to_omap_dma_chan(chan);
371 enum dma_slave_buswidth dev_width;
372 struct omap_desc *d;
373 dma_addr_t dev_addr;
374 unsigned es, sync_type;
375 u32 burst;
376
377 if (dir == DMA_DEV_TO_MEM) {
378 dev_addr = c->cfg.src_addr;
379 dev_width = c->cfg.src_addr_width;
380 burst = c->cfg.src_maxburst;
381 sync_type = OMAP_DMA_SRC_SYNC;
382 } else if (dir == DMA_MEM_TO_DEV) {
383 dev_addr = c->cfg.dst_addr;
384 dev_width = c->cfg.dst_addr_width;
385 burst = c->cfg.dst_maxburst;
386 sync_type = OMAP_DMA_DST_SYNC;
387 } else {
388 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
389 return NULL;
390 }
391
392 /* Bus width translates to the element size (ES) */
393 switch (dev_width) {
394 case DMA_SLAVE_BUSWIDTH_1_BYTE:
395 es = OMAP_DMA_DATA_TYPE_S8;
396 break;
397 case DMA_SLAVE_BUSWIDTH_2_BYTES:
398 es = OMAP_DMA_DATA_TYPE_S16;
399 break;
400 case DMA_SLAVE_BUSWIDTH_4_BYTES:
401 es = OMAP_DMA_DATA_TYPE_S32;
402 break;
403 default: /* not reached */
404 return NULL;
405 }
406
407 /* Now allocate and setup the descriptor. */
408 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
409 if (!d)
410 return NULL;
411
412 d->dir = dir;
413 d->dev_addr = dev_addr;
414 d->fi = burst;
415 d->es = es;
416 d->sync_mode = OMAP_DMA_SYNC_PACKET;
417 d->sync_type = sync_type;
418 d->periph_port = OMAP_DMA_PORT_MPUI;
419 d->sg[0].addr = buf_addr;
420 d->sg[0].en = period_len / es_bytes[es];
421 d->sg[0].fn = buf_len / period_len;
422 d->sglen = 1;
423
424 if (!c->cyclic) {
425 c->cyclic = true;
426 omap_dma_link_lch(c->dma_ch, c->dma_ch);
427 omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ);
428 omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ);
429 }
430
431 if (!cpu_class_is_omap1()) {
432 omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
433 omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
434 }
435
436 return vchan_tx_prep(&c->vc, &d->vd, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
437}
438
361static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg) 439static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
362{ 440{
363 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 441 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
@@ -392,6 +470,11 @@ static int omap_dma_terminate_all(struct omap_chan *c)
392 omap_stop_dma(c->dma_ch); 470 omap_stop_dma(c->dma_ch);
393 } 471 }
394 472
473 if (c->cyclic) {
474 c->cyclic = false;
475 omap_dma_unlink_lch(c->dma_ch, c->dma_ch);
476 }
477
395 vchan_get_all_descriptors(&c->vc, &head); 478 vchan_get_all_descriptors(&c->vc, &head);
396 spin_unlock_irqrestore(&c->vc.lock, flags); 479 spin_unlock_irqrestore(&c->vc.lock, flags);
397 vchan_dma_desc_free_list(&c->vc, &head); 480 vchan_dma_desc_free_list(&c->vc, &head);
@@ -484,11 +567,13 @@ static int omap_dma_probe(struct platform_device *pdev)
484 return -ENOMEM; 567 return -ENOMEM;
485 568
486 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); 569 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
570 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
487 od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources; 571 od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
488 od->ddev.device_free_chan_resources = omap_dma_free_chan_resources; 572 od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
489 od->ddev.device_tx_status = omap_dma_tx_status; 573 od->ddev.device_tx_status = omap_dma_tx_status;
490 od->ddev.device_issue_pending = omap_dma_issue_pending; 574 od->ddev.device_issue_pending = omap_dma_issue_pending;
491 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; 575 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
576 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
492 od->ddev.device_control = omap_dma_control; 577 od->ddev.device_control = omap_dma_control;
493 od->ddev.dev = &pdev->dev; 578 od->ddev.dev = &pdev->dev;
494 INIT_LIST_HEAD(&od->ddev.channels); 579 INIT_LIST_HEAD(&od->ddev.channels);