aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/dmaengine.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-01-05 19:14:31 -0500
committerDan Williams <dan.j.williams@intel.com>2009-01-05 20:10:19 -0500
commit07f2211e4fbce6990722d78c4f04225da9c0e9cf (patch)
tree51934e20a334e93c8c399d2e6375f264551e9bc3 /drivers/dma/dmaengine.c
parent28405d8d9ce05f5bd869ef8b48da5086f9527d73 (diff)
dmaengine: remove dependency on async_tx
async_tx.ko is a consumer of dma channels. A circular dependency arises if modules in drivers/dma rely on common code in async_tx.ko. It prevents either module from being unloaded. Move dma_wait_for_async_tx and async_tx_run_dependencies to dmaeninge.o where they should have been from the beginning. Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/dmaengine.c')
-rw-r--r--drivers/dma/dmaengine.c84
1 files changed, 84 insertions, 0 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 657996517374..b9008932a8f3 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -626,6 +626,90 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
626} 626}
627EXPORT_SYMBOL(dma_async_tx_descriptor_init); 627EXPORT_SYMBOL(dma_async_tx_descriptor_init);
628 628
629/* dma_wait_for_async_tx - spin wait for a transaction to complete
630 * @tx: in-flight transaction to wait on
631 *
632 * This routine assumes that tx was obtained from a call to async_memcpy,
633 * async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped
634 * and submitted). Walking the parent chain is only meant to cover for DMA
635 * drivers that do not implement the DMA_INTERRUPT capability and may race with
636 * the driver's descriptor cleanup routine.
637 */
638enum dma_status
639dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
640{
641 enum dma_status status;
642 struct dma_async_tx_descriptor *iter;
643 struct dma_async_tx_descriptor *parent;
644
645 if (!tx)
646 return DMA_SUCCESS;
647
648 WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for"
649 " %s\n", __func__, dev_name(&tx->chan->dev));
650
651 /* poll through the dependency chain, return when tx is complete */
652 do {
653 iter = tx;
654
655 /* find the root of the unsubmitted dependency chain */
656 do {
657 parent = iter->parent;
658 if (!parent)
659 break;
660 else
661 iter = parent;
662 } while (parent);
663
664 /* there is a small window for ->parent == NULL and
665 * ->cookie == -EBUSY
666 */
667 while (iter->cookie == -EBUSY)
668 cpu_relax();
669
670 status = dma_sync_wait(iter->chan, iter->cookie);
671 } while (status == DMA_IN_PROGRESS || (iter != tx));
672
673 return status;
674}
675EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
676
677/* dma_run_dependencies - helper routine for dma drivers to process
678 * (start) dependent operations on their target channel
679 * @tx: transaction with dependencies
680 */
681void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
682{
683 struct dma_async_tx_descriptor *dep = tx->next;
684 struct dma_async_tx_descriptor *dep_next;
685 struct dma_chan *chan;
686
687 if (!dep)
688 return;
689
690 chan = dep->chan;
691
692 /* keep submitting up until a channel switch is detected
693 * in that case we will be called again as a result of
694 * processing the interrupt from async_tx_channel_switch
695 */
696 for (; dep; dep = dep_next) {
697 spin_lock_bh(&dep->lock);
698 dep->parent = NULL;
699 dep_next = dep->next;
700 if (dep_next && dep_next->chan == chan)
701 dep->next = NULL; /* ->next will be submitted */
702 else
703 dep_next = NULL; /* submit current dep and terminate */
704 spin_unlock_bh(&dep->lock);
705
706 dep->tx_submit(dep);
707 }
708
709 chan->device->device_issue_pending(chan);
710}
711EXPORT_SYMBOL_GPL(dma_run_dependencies);
712
629static int __init dma_bus_init(void) 713static int __init dma_bus_init(void)
630{ 714{
631 mutex_init(&dma_list_mutex); 715 mutex_init(&dma_list_mutex);