aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--crypto/async_tx/async_tx.c75
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/dmaengine.c84
-rw-r--r--drivers/dma/iop-adma.c3
-rw-r--r--drivers/dma/mv_xor.c3
-rw-r--r--include/linux/async_tx.h15
-rw-r--r--include/linux/dmaengine.h9
7 files changed, 95 insertions, 96 deletions
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index dcbf1be149f3..8cfac182165d 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -72,81 +72,6 @@ void async_tx_issue_pending_all(void)
72} 72}
73EXPORT_SYMBOL_GPL(async_tx_issue_pending_all); 73EXPORT_SYMBOL_GPL(async_tx_issue_pending_all);
74 74
75/* dma_wait_for_async_tx - spin wait for a transcation to complete
76 * @tx: transaction to wait on
77 */
78enum dma_status
79dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
80{
81 enum dma_status status;
82 struct dma_async_tx_descriptor *iter;
83 struct dma_async_tx_descriptor *parent;
84
85 if (!tx)
86 return DMA_SUCCESS;
87
88 /* poll through the dependency chain, return when tx is complete */
89 do {
90 iter = tx;
91
92 /* find the root of the unsubmitted dependency chain */
93 do {
94 parent = iter->parent;
95 if (!parent)
96 break;
97 else
98 iter = parent;
99 } while (parent);
100
101 /* there is a small window for ->parent == NULL and
102 * ->cookie == -EBUSY
103 */
104 while (iter->cookie == -EBUSY)
105 cpu_relax();
106
107 status = dma_sync_wait(iter->chan, iter->cookie);
108 } while (status == DMA_IN_PROGRESS || (iter != tx));
109
110 return status;
111}
112EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
113
114/* async_tx_run_dependencies - helper routine for dma drivers to process
115 * (start) dependent operations on their target channel
116 * @tx: transaction with dependencies
117 */
118void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx)
119{
120 struct dma_async_tx_descriptor *dep = tx->next;
121 struct dma_async_tx_descriptor *dep_next;
122 struct dma_chan *chan;
123
124 if (!dep)
125 return;
126
127 chan = dep->chan;
128
129 /* keep submitting up until a channel switch is detected
130 * in that case we will be called again as a result of
131 * processing the interrupt from async_tx_channel_switch
132 */
133 for (; dep; dep = dep_next) {
134 spin_lock_bh(&dep->lock);
135 dep->parent = NULL;
136 dep_next = dep->next;
137 if (dep_next && dep_next->chan == chan)
138 dep->next = NULL; /* ->next will be submitted */
139 else
140 dep_next = NULL; /* submit current dep and terminate */
141 spin_unlock_bh(&dep->lock);
142
143 dep->tx_submit(dep);
144 }
145
146 chan->device->device_issue_pending(chan);
147}
148EXPORT_SYMBOL_GPL(async_tx_run_dependencies);
149
150static void 75static void
151free_dma_chan_ref(struct rcu_head *rcu) 76free_dma_chan_ref(struct rcu_head *rcu)
152{ 77{
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 904e57558bb5..e34b06420816 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -33,7 +33,6 @@ config INTEL_IOATDMA
33config INTEL_IOP_ADMA 33config INTEL_IOP_ADMA
34 tristate "Intel IOP ADMA support" 34 tristate "Intel IOP ADMA support"
35 depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX 35 depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX
36 select ASYNC_CORE
37 select DMA_ENGINE 36 select DMA_ENGINE
38 help 37 help
39 Enable support for the Intel(R) IOP Series RAID engines. 38 Enable support for the Intel(R) IOP Series RAID engines.
@@ -59,7 +58,6 @@ config FSL_DMA
59config MV_XOR 58config MV_XOR
60 bool "Marvell XOR engine support" 59 bool "Marvell XOR engine support"
61 depends on PLAT_ORION 60 depends on PLAT_ORION
62 select ASYNC_CORE
63 select DMA_ENGINE 61 select DMA_ENGINE
64 ---help--- 62 ---help---
65 Enable support for the Marvell XOR engine. 63 Enable support for the Marvell XOR engine.
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 657996517374..b9008932a8f3 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -626,6 +626,90 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
626} 626}
627EXPORT_SYMBOL(dma_async_tx_descriptor_init); 627EXPORT_SYMBOL(dma_async_tx_descriptor_init);
628 628
629/* dma_wait_for_async_tx - spin wait for a transaction to complete
630 * @tx: in-flight transaction to wait on
631 *
632 * This routine assumes that tx was obtained from a call to async_memcpy,
633 * async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped
634 * and submitted). Walking the parent chain is only meant to cover for DMA
635 * drivers that do not implement the DMA_INTERRUPT capability and may race with
636 * the driver's descriptor cleanup routine.
637 */
638enum dma_status
639dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
640{
641 enum dma_status status;
642 struct dma_async_tx_descriptor *iter;
643 struct dma_async_tx_descriptor *parent;
644
645 if (!tx)
646 return DMA_SUCCESS;
647
648 WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for"
649 " %s\n", __func__, dev_name(&tx->chan->dev));
650
651 /* poll through the dependency chain, return when tx is complete */
652 do {
653 iter = tx;
654
655 /* find the root of the unsubmitted dependency chain */
656 do {
657 parent = iter->parent;
658 if (!parent)
659 break;
660 else
661 iter = parent;
662 } while (parent);
663
664 /* there is a small window for ->parent == NULL and
665 * ->cookie == -EBUSY
666 */
667 while (iter->cookie == -EBUSY)
668 cpu_relax();
669
670 status = dma_sync_wait(iter->chan, iter->cookie);
671 } while (status == DMA_IN_PROGRESS || (iter != tx));
672
673 return status;
674}
675EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
676
677/* dma_run_dependencies - helper routine for dma drivers to process
678 * (start) dependent operations on their target channel
679 * @tx: transaction with dependencies
680 */
681void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
682{
683 struct dma_async_tx_descriptor *dep = tx->next;
684 struct dma_async_tx_descriptor *dep_next;
685 struct dma_chan *chan;
686
687 if (!dep)
688 return;
689
690 chan = dep->chan;
691
692 /* keep submitting up until a channel switch is detected
693 * in that case we will be called again as a result of
694 * processing the interrupt from async_tx_channel_switch
695 */
696 for (; dep; dep = dep_next) {
697 spin_lock_bh(&dep->lock);
698 dep->parent = NULL;
699 dep_next = dep->next;
700 if (dep_next && dep_next->chan == chan)
701 dep->next = NULL; /* ->next will be submitted */
702 else
703 dep_next = NULL; /* submit current dep and terminate */
704 spin_unlock_bh(&dep->lock);
705
706 dep->tx_submit(dep);
707 }
708
709 chan->device->device_issue_pending(chan);
710}
711EXPORT_SYMBOL_GPL(dma_run_dependencies);
712
629static int __init dma_bus_init(void) 713static int __init dma_bus_init(void)
630{ 714{
631 mutex_init(&dma_list_mutex); 715 mutex_init(&dma_list_mutex);
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 6be317262200..be9ea9f88805 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -24,7 +24,6 @@
24 24
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/async_tx.h>
28#include <linux/delay.h> 27#include <linux/delay.h>
29#include <linux/dma-mapping.h> 28#include <linux/dma-mapping.h>
30#include <linux/spinlock.h> 29#include <linux/spinlock.h>
@@ -116,7 +115,7 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
116 } 115 }
117 116
118 /* run dependent operations */ 117 /* run dependent operations */
119 async_tx_run_dependencies(&desc->async_tx); 118 dma_run_dependencies(&desc->async_tx);
120 119
121 return cookie; 120 return cookie;
122} 121}
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index bcda17426411..3f46df3390c7 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -18,7 +18,6 @@
18 18
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/async_tx.h>
22#include <linux/delay.h> 21#include <linux/delay.h>
23#include <linux/dma-mapping.h> 22#include <linux/dma-mapping.h>
24#include <linux/spinlock.h> 23#include <linux/spinlock.h>
@@ -340,7 +339,7 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
340 } 339 }
341 340
342 /* run dependent operations */ 341 /* run dependent operations */
343 async_tx_run_dependencies(&desc->async_tx); 342 dma_run_dependencies(&desc->async_tx);
344 343
345 return cookie; 344 return cookie;
346} 345}
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 0f50d4cc4360..1c816775f135 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -60,8 +60,6 @@ enum async_tx_flags {
60 60
61#ifdef CONFIG_DMA_ENGINE 61#ifdef CONFIG_DMA_ENGINE
62void async_tx_issue_pending_all(void); 62void async_tx_issue_pending_all(void);
63enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
64void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx);
65#ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL 63#ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL
66#include <asm/async_tx.h> 64#include <asm/async_tx.h>
67#else 65#else
@@ -77,19 +75,6 @@ static inline void async_tx_issue_pending_all(void)
77 do { } while (0); 75 do { } while (0);
78} 76}
79 77
80static inline enum dma_status
81dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
82{
83 return DMA_SUCCESS;
84}
85
86static inline void
87async_tx_run_dependencies(struct dma_async_tx_descriptor *tx,
88 struct dma_chan *host_chan)
89{
90 do { } while (0);
91}
92
93static inline struct dma_chan * 78static inline struct dma_chan *
94async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, 79async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
95 enum dma_transaction_type tx_type, struct page **dst, int dst_count, 80 enum dma_transaction_type tx_type, struct page **dst, int dst_count,
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index adb0b084eb5a..e4ec7e7b8056 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -475,11 +475,20 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
475} 475}
476 476
477enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); 477enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
478#ifdef CONFIG_DMA_ENGINE
479enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
480#else
481static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
482{
483 return DMA_SUCCESS;
484}
485#endif
478 486
479/* --- DMA device --- */ 487/* --- DMA device --- */
480 488
481int dma_async_device_register(struct dma_device *device); 489int dma_async_device_register(struct dma_device *device);
482void dma_async_device_unregister(struct dma_device *device); 490void dma_async_device_unregister(struct dma_device *device);
491void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
483 492
484/* --- Helper iov-locking functions --- */ 493/* --- Helper iov-locking functions --- */
485 494