diff options
author | Dan Williams <dan.j.williams@intel.com> | 2009-01-05 19:14:31 -0500 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-01-05 20:10:19 -0500 |
commit | 07f2211e4fbce6990722d78c4f04225da9c0e9cf (patch) | |
tree | 51934e20a334e93c8c399d2e6375f264551e9bc3 /crypto/async_tx/async_tx.c | |
parent | 28405d8d9ce05f5bd869ef8b48da5086f9527d73 (diff) |
dmaengine: remove dependency on async_tx
async_tx.ko is a consumer of dma channels. A circular dependency arises
if modules in drivers/dma rely on common code in async_tx.ko. It
prevents either module from being unloaded.
Move dma_wait_for_async_tx and async_tx_run_dependencies to dmaeninge.o
where they should have been from the beginning.
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'crypto/async_tx/async_tx.c')
-rw-r--r-- | crypto/async_tx/async_tx.c | 75 |
1 files changed, 0 insertions, 75 deletions
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index dcbf1be149f3..8cfac182165d 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c | |||
@@ -72,81 +72,6 @@ void async_tx_issue_pending_all(void) | |||
72 | } | 72 | } |
73 | EXPORT_SYMBOL_GPL(async_tx_issue_pending_all); | 73 | EXPORT_SYMBOL_GPL(async_tx_issue_pending_all); |
74 | 74 | ||
75 | /* dma_wait_for_async_tx - spin wait for a transcation to complete | ||
76 | * @tx: transaction to wait on | ||
77 | */ | ||
78 | enum dma_status | ||
79 | dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | ||
80 | { | ||
81 | enum dma_status status; | ||
82 | struct dma_async_tx_descriptor *iter; | ||
83 | struct dma_async_tx_descriptor *parent; | ||
84 | |||
85 | if (!tx) | ||
86 | return DMA_SUCCESS; | ||
87 | |||
88 | /* poll through the dependency chain, return when tx is complete */ | ||
89 | do { | ||
90 | iter = tx; | ||
91 | |||
92 | /* find the root of the unsubmitted dependency chain */ | ||
93 | do { | ||
94 | parent = iter->parent; | ||
95 | if (!parent) | ||
96 | break; | ||
97 | else | ||
98 | iter = parent; | ||
99 | } while (parent); | ||
100 | |||
101 | /* there is a small window for ->parent == NULL and | ||
102 | * ->cookie == -EBUSY | ||
103 | */ | ||
104 | while (iter->cookie == -EBUSY) | ||
105 | cpu_relax(); | ||
106 | |||
107 | status = dma_sync_wait(iter->chan, iter->cookie); | ||
108 | } while (status == DMA_IN_PROGRESS || (iter != tx)); | ||
109 | |||
110 | return status; | ||
111 | } | ||
112 | EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); | ||
113 | |||
114 | /* async_tx_run_dependencies - helper routine for dma drivers to process | ||
115 | * (start) dependent operations on their target channel | ||
116 | * @tx: transaction with dependencies | ||
117 | */ | ||
118 | void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx) | ||
119 | { | ||
120 | struct dma_async_tx_descriptor *dep = tx->next; | ||
121 | struct dma_async_tx_descriptor *dep_next; | ||
122 | struct dma_chan *chan; | ||
123 | |||
124 | if (!dep) | ||
125 | return; | ||
126 | |||
127 | chan = dep->chan; | ||
128 | |||
129 | /* keep submitting up until a channel switch is detected | ||
130 | * in that case we will be called again as a result of | ||
131 | * processing the interrupt from async_tx_channel_switch | ||
132 | */ | ||
133 | for (; dep; dep = dep_next) { | ||
134 | spin_lock_bh(&dep->lock); | ||
135 | dep->parent = NULL; | ||
136 | dep_next = dep->next; | ||
137 | if (dep_next && dep_next->chan == chan) | ||
138 | dep->next = NULL; /* ->next will be submitted */ | ||
139 | else | ||
140 | dep_next = NULL; /* submit current dep and terminate */ | ||
141 | spin_unlock_bh(&dep->lock); | ||
142 | |||
143 | dep->tx_submit(dep); | ||
144 | } | ||
145 | |||
146 | chan->device->device_issue_pending(chan); | ||
147 | } | ||
148 | EXPORT_SYMBOL_GPL(async_tx_run_dependencies); | ||
149 | |||
150 | static void | 75 | static void |
151 | free_dma_chan_ref(struct rcu_head *rcu) | 76 | free_dma_chan_ref(struct rcu_head *rcu) |
152 | { | 77 | { |