aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexandre Bounine <alexandre.bounine@idt.com>2016-03-22 17:25:57 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-22 18:36:02 -0400
commitd2a321f37ed49de86058b5daaf50a11d3ee2d61f (patch)
treee9f9e02f5f56dc224064fc557a48a4ff0b90ddcc
parent9673b883c261b055433527e9249781b43172c103 (diff)
rapidio/tsi721_dma: fix pending transaction queue handling
Fix pending DMA request queue handling to avoid broken ordering during concurrent request submissions. Signed-off-by: Alexandre Bounine <alexandre.bounine@idt.com> Cc: Matt Porter <mporter@kernel.crashing.org> Cc: Aurelien Jacquiot <a-jacquiot@ti.com> Cc: Andre van Herk <andre.van.herk@prodrive-technologies.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--drivers/rapidio/devices/tsi721.h2
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c60
2 files changed, 32 insertions, 30 deletions
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index f81d01149b39..d675a44ffc17 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -674,7 +674,7 @@ struct tsi721_bdma_chan {
674 struct dma_chan dchan; 674 struct dma_chan dchan;
675 struct tsi721_tx_desc *tx_desc; 675 struct tsi721_tx_desc *tx_desc;
676 spinlock_t lock; 676 spinlock_t lock;
677 struct list_head active_list; 677 struct tsi721_tx_desc *active_tx;
678 struct list_head queue; 678 struct list_head queue;
679 struct list_head free_list; 679 struct list_head free_list;
680 struct tasklet_struct tasklet; 680 struct tasklet_struct tasklet;
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
index 47295940a868..500e1e044c36 100644
--- a/drivers/rapidio/devices/tsi721_dma.c
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -63,14 +63,6 @@ struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd)
63 return container_of(txd, struct tsi721_tx_desc, txd); 63 return container_of(txd, struct tsi721_tx_desc, txd);
64} 64}
65 65
66static inline
67struct tsi721_tx_desc *tsi721_dma_first_active(
68 struct tsi721_bdma_chan *bdma_chan)
69{
70 return list_first_entry(&bdma_chan->active_list,
71 struct tsi721_tx_desc, desc_node);
72}
73
74static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num) 66static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
75{ 67{
76 struct tsi721_dma_desc *bd_ptr; 68 struct tsi721_dma_desc *bd_ptr;
@@ -534,23 +526,30 @@ entry_done:
534 return err; 526 return err;
535} 527}
536 528
537static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan) 529static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan,
530 struct tsi721_tx_desc *desc)
538{ 531{
539 struct tsi721_tx_desc *desc;
540 int err; 532 int err;
541 533
542 dev_dbg(bdma_chan->dchan.device->dev, "%s: Enter\n", __func__); 534 dev_dbg(bdma_chan->dchan.device->dev, "%s: Enter\n", __func__);
543 535
536 if (!tsi721_dma_is_idle(bdma_chan))
537 return;
538
544 /* 539 /*
545 * If there are any new transactions in the queue add them 540 * If there is no data transfer in progress, fetch new descriptor from
546 * into the processing list 541 * the pending queue.
547 */ 542 */
548 if (!list_empty(&bdma_chan->queue)) 543
549 list_splice_init(&bdma_chan->queue, &bdma_chan->active_list); 544 if (desc == NULL && bdma_chan->active_tx == NULL &&
545 !list_empty(&bdma_chan->queue)) {
546 desc = list_first_entry(&bdma_chan->queue,
547 struct tsi721_tx_desc, desc_node);
548 list_del_init((&desc->desc_node));
549 bdma_chan->active_tx = desc;
550 }
550 551
551 /* Start new transaction (if available) */ 552 if (desc) {
552 if (!list_empty(&bdma_chan->active_list)) {
553 desc = tsi721_dma_first_active(bdma_chan);
554 err = tsi721_submit_sg(desc); 553 err = tsi721_submit_sg(desc);
555 if (!err) 554 if (!err)
556 tsi721_start_dma(bdma_chan); 555 tsi721_start_dma(bdma_chan);
@@ -581,6 +580,10 @@ static void tsi721_dma_tasklet(unsigned long data)
581 dev_err(bdma_chan->dchan.device->dev, 580 dev_err(bdma_chan->dchan.device->dev,
582 "%s: DMA ERROR - DMAC%d_STS = 0x%x\n", 581 "%s: DMA ERROR - DMAC%d_STS = 0x%x\n",
583 __func__, bdma_chan->id, dmac_sts); 582 __func__, bdma_chan->id, dmac_sts);
583
584 spin_lock(&bdma_chan->lock);
585 bdma_chan->active_tx = NULL;
586 spin_unlock(&bdma_chan->lock);
584 } 587 }
585 588
586 if (dmac_int & TSI721_DMAC_INT_STFULL) { 589 if (dmac_int & TSI721_DMAC_INT_STFULL) {
@@ -594,7 +597,7 @@ static void tsi721_dma_tasklet(unsigned long data)
594 597
595 tsi721_clr_stat(bdma_chan); 598 tsi721_clr_stat(bdma_chan);
596 spin_lock(&bdma_chan->lock); 599 spin_lock(&bdma_chan->lock);
597 desc = tsi721_dma_first_active(bdma_chan); 600 desc = bdma_chan->active_tx;
598 601
599 if (desc->sg_len == 0) { 602 if (desc->sg_len == 0) {
600 dma_async_tx_callback callback = NULL; 603 dma_async_tx_callback callback = NULL;
@@ -606,14 +609,15 @@ static void tsi721_dma_tasklet(unsigned long data)
606 callback = desc->txd.callback; 609 callback = desc->txd.callback;
607 param = desc->txd.callback_param; 610 param = desc->txd.callback_param;
608 } 611 }
609 list_move(&desc->desc_node, &bdma_chan->free_list); 612 list_add(&desc->desc_node, &bdma_chan->free_list);
613 bdma_chan->active_tx = NULL;
610 spin_unlock(&bdma_chan->lock); 614 spin_unlock(&bdma_chan->lock);
611 if (callback) 615 if (callback)
612 callback(param); 616 callback(param);
613 spin_lock(&bdma_chan->lock); 617 spin_lock(&bdma_chan->lock);
614 } 618 }
615 619
616 tsi721_advance_work(bdma_chan); 620 tsi721_advance_work(bdma_chan, bdma_chan->active_tx);
617 spin_unlock(&bdma_chan->lock); 621 spin_unlock(&bdma_chan->lock);
618 } 622 }
619 623
@@ -720,9 +724,6 @@ static void tsi721_free_chan_resources(struct dma_chan *dchan)
720 if (bdma_chan->bd_base == NULL) 724 if (bdma_chan->bd_base == NULL)
721 return; 725 return;
722 726
723 BUG_ON(!list_empty(&bdma_chan->active_list));
724 BUG_ON(!list_empty(&bdma_chan->queue));
725
726 tsi721_bdma_interrupt_enable(bdma_chan, 0); 727 tsi721_bdma_interrupt_enable(bdma_chan, 0);
727 bdma_chan->active = false; 728 bdma_chan->active = false;
728 tsi721_sync_dma_irq(bdma_chan); 729 tsi721_sync_dma_irq(bdma_chan);
@@ -745,11 +746,11 @@ static void tsi721_issue_pending(struct dma_chan *dchan)
745 746
746 dev_dbg(dchan->device->dev, "%s: Enter\n", __func__); 747 dev_dbg(dchan->device->dev, "%s: Enter\n", __func__);
747 748
749 spin_lock_bh(&bdma_chan->lock);
748 if (tsi721_dma_is_idle(bdma_chan) && bdma_chan->active) { 750 if (tsi721_dma_is_idle(bdma_chan) && bdma_chan->active) {
749 spin_lock_bh(&bdma_chan->lock); 751 tsi721_advance_work(bdma_chan, NULL);
750 tsi721_advance_work(bdma_chan);
751 spin_unlock_bh(&bdma_chan->lock);
752 } 752 }
753 spin_unlock_bh(&bdma_chan->lock);
753} 754}
754 755
755static 756static
@@ -839,7 +840,8 @@ static int tsi721_terminate_all(struct dma_chan *dchan)
839 } while ((dmac_int & TSI721_DMAC_INT_SUSP) == 0); 840 } while ((dmac_int & TSI721_DMAC_INT_SUSP) == 0);
840 } 841 }
841 842
842 list_splice_init(&bdma_chan->active_list, &list); 843 if (bdma_chan->active_tx)
844 list_add(&bdma_chan->active_tx->desc_node, &list);
843 list_splice_init(&bdma_chan->queue, &list); 845 list_splice_init(&bdma_chan->queue, &list);
844 846
845 list_for_each_entry_safe(desc, _d, &list, desc_node) 847 list_for_each_entry_safe(desc, _d, &list, desc_node)
@@ -875,7 +877,7 @@ int tsi721_register_dma(struct tsi721_device *priv)
875 877
876 spin_lock_init(&bdma_chan->lock); 878 spin_lock_init(&bdma_chan->lock);
877 879
878 INIT_LIST_HEAD(&bdma_chan->active_list); 880 bdma_chan->active_tx = NULL;
879 INIT_LIST_HEAD(&bdma_chan->queue); 881 INIT_LIST_HEAD(&bdma_chan->queue);
880 INIT_LIST_HEAD(&bdma_chan->free_list); 882 INIT_LIST_HEAD(&bdma_chan->free_list);
881 883