aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ep93xx_dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/ep93xx_dma.c')
-rw-r--r--drivers/dma/ep93xx_dma.c60
1 files changed, 48 insertions, 12 deletions
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 526a7424f6a9..59e7a965772b 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -246,6 +246,9 @@ static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
246static struct ep93xx_dma_desc * 246static struct ep93xx_dma_desc *
247ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac) 247ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
248{ 248{
249 if (list_empty(&edmac->active))
250 return NULL;
251
249 return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node); 252 return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
250} 253}
251 254
@@ -263,16 +266,22 @@ ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
263 */ 266 */
264static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac) 267static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
265{ 268{
269 struct ep93xx_dma_desc *desc;
270
266 list_rotate_left(&edmac->active); 271 list_rotate_left(&edmac->active);
267 272
268 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) 273 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
269 return true; 274 return true;
270 275
276 desc = ep93xx_dma_get_active(edmac);
277 if (!desc)
278 return false;
279
271 /* 280 /*
272 * If txd.cookie is set it means that we are back in the first 281 * If txd.cookie is set it means that we are back in the first
273 * descriptor in the chain and hence done with it. 282 * descriptor in the chain and hence done with it.
274 */ 283 */
275 return !ep93xx_dma_get_active(edmac)->txd.cookie; 284 return !desc->txd.cookie;
276} 285}
277 286
278/* 287/*
@@ -327,9 +336,15 @@ static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
327 336
328static void m2p_fill_desc(struct ep93xx_dma_chan *edmac) 337static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
329{ 338{
330 struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac); 339 struct ep93xx_dma_desc *desc;
331 u32 bus_addr; 340 u32 bus_addr;
332 341
342 desc = ep93xx_dma_get_active(edmac);
343 if (!desc) {
344 dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
345 return;
346 }
347
333 if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV) 348 if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
334 bus_addr = desc->src_addr; 349 bus_addr = desc->src_addr;
335 else 350 else
@@ -491,7 +506,13 @@ static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
491 506
492static void m2m_fill_desc(struct ep93xx_dma_chan *edmac) 507static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
493{ 508{
494 struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac); 509 struct ep93xx_dma_desc *desc;
510
511 desc = ep93xx_dma_get_active(edmac);
512 if (!desc) {
513 dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
514 return;
515 }
495 516
496 if (edmac->buffer == 0) { 517 if (edmac->buffer == 0) {
497 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0); 518 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
@@ -669,24 +690,30 @@ static void ep93xx_dma_tasklet(unsigned long data)
669{ 690{
670 struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; 691 struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
671 struct ep93xx_dma_desc *desc, *d; 692 struct ep93xx_dma_desc *desc, *d;
672 dma_async_tx_callback callback; 693 dma_async_tx_callback callback = NULL;
673 void *callback_param; 694 void *callback_param = NULL;
674 LIST_HEAD(list); 695 LIST_HEAD(list);
675 696
676 spin_lock_irq(&edmac->lock); 697 spin_lock_irq(&edmac->lock);
698 /*
699 * If dma_terminate_all() was called before we get to run, the active
700 * list has become empty. If that happens we aren't supposed to do
701 * anything more than call ep93xx_dma_advance_work().
702 */
677 desc = ep93xx_dma_get_active(edmac); 703 desc = ep93xx_dma_get_active(edmac);
678 if (desc->complete) { 704 if (desc) {
679 edmac->last_completed = desc->txd.cookie; 705 if (desc->complete) {
680 list_splice_init(&edmac->active, &list); 706 edmac->last_completed = desc->txd.cookie;
707 list_splice_init(&edmac->active, &list);
708 }
709 callback = desc->txd.callback;
710 callback_param = desc->txd.callback_param;
681 } 711 }
682 spin_unlock_irq(&edmac->lock); 712 spin_unlock_irq(&edmac->lock);
683 713
684 /* Pick up the next descriptor from the queue */ 714 /* Pick up the next descriptor from the queue */
685 ep93xx_dma_advance_work(edmac); 715 ep93xx_dma_advance_work(edmac);
686 716
687 callback = desc->txd.callback;
688 callback_param = desc->txd.callback_param;
689
690 /* Now we can release all the chained descriptors */ 717 /* Now we can release all the chained descriptors */
691 list_for_each_entry_safe(desc, d, &list, node) { 718 list_for_each_entry_safe(desc, d, &list, node) {
692 /* 719 /*
@@ -706,13 +733,22 @@ static void ep93xx_dma_tasklet(unsigned long data)
706static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id) 733static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
707{ 734{
708 struct ep93xx_dma_chan *edmac = dev_id; 735 struct ep93xx_dma_chan *edmac = dev_id;
736 struct ep93xx_dma_desc *desc;
709 irqreturn_t ret = IRQ_HANDLED; 737 irqreturn_t ret = IRQ_HANDLED;
710 738
711 spin_lock(&edmac->lock); 739 spin_lock(&edmac->lock);
712 740
741 desc = ep93xx_dma_get_active(edmac);
742 if (!desc) {
743 dev_warn(chan2dev(edmac),
744 "got interrupt while active list is empty\n");
745 spin_unlock(&edmac->lock);
746 return IRQ_NONE;
747 }
748
713 switch (edmac->edma->hw_interrupt(edmac)) { 749 switch (edmac->edma->hw_interrupt(edmac)) {
714 case INTERRUPT_DONE: 750 case INTERRUPT_DONE:
715 ep93xx_dma_get_active(edmac)->complete = true; 751 desc->complete = true;
716 tasklet_schedule(&edmac->tasklet); 752 tasklet_schedule(&edmac->tasklet);
717 break; 753 break;
718 754