aboutsummaryrefslogtreecommitdiffstats
path: root/crypto/async_tx/async_tx.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-06-03 14:43:59 -0400
committerDan Williams <dan.j.williams@intel.com>2009-06-03 17:07:35 -0400
commita08abd8ca890a377521d65d493d174bebcaf694b (patch)
tree987c149a2d7d6ab345f426ac28191627b4a02a3e /crypto/async_tx/async_tx.c
parent88ba2aa586c874681c072101287e15d40de7e6e2 (diff)
async_tx: structify submission arguments, add scribble
Prepare the api for the arrival of a new parameter, 'scribble'. This will allow callers to identify scratchpad memory for dma address or page address conversions. As this adds yet another parameter, take this opportunity to convert the common submission parameters (flags, dependency, callback, and callback argument) into an object that is passed by reference. Also, take this opportunity to fix up the kerneldoc and add notes about the relevant ASYNC_TX_* flags for each routine. [ Impact: moves api pass-by-value parameters to a pass-by-reference struct ] Signed-off-by: Andre Noll <maan@systemlinux.org> Acked-by: Maciej Sosnowski <maciej.sosnowski@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'crypto/async_tx/async_tx.c')
-rw-r--r--crypto/async_tx/async_tx.c51
1 files changed, 27 insertions, 24 deletions
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 3766bc3d7d89..802a5ce437d9 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -45,13 +45,15 @@ static void __exit async_tx_exit(void)
45/** 45/**
46 * __async_tx_find_channel - find a channel to carry out the operation or let 46 * __async_tx_find_channel - find a channel to carry out the operation or let
47 * the transaction execute synchronously 47 * the transaction execute synchronously
48 * @depend_tx: transaction dependency 48 * @submit: transaction dependency and submission modifiers
49 * @tx_type: transaction type 49 * @tx_type: transaction type
50 */ 50 */
51struct dma_chan * 51struct dma_chan *
52__async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, 52__async_tx_find_channel(struct async_submit_ctl *submit,
53 enum dma_transaction_type tx_type) 53 enum dma_transaction_type tx_type)
54{ 54{
55 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
56
55 /* see if we can keep the chain on one channel */ 57 /* see if we can keep the chain on one channel */
56 if (depend_tx && 58 if (depend_tx &&
57 dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) 59 dma_has_cap(tx_type, depend_tx->chan->device->cap_mask))
@@ -144,13 +146,14 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
144 146
145 147
146/** 148/**
147 * submit_disposition - while holding depend_tx->lock we must avoid submitting 149 * submit_disposition - flags for routing an incoming operation
148 * new operations to prevent a circular locking dependency with
149 * drivers that already hold a channel lock when calling
150 * async_tx_run_dependencies.
151 * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock 150 * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock
152 * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch 151 * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch
153 * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly 152 * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly
153 *
154 * while holding depend_tx->lock we must avoid submitting new operations
155 * to prevent a circular locking dependency with drivers that already
156 * hold a channel lock when calling async_tx_run_dependencies.
154 */ 157 */
155enum submit_disposition { 158enum submit_disposition {
156 ASYNC_TX_SUBMITTED, 159 ASYNC_TX_SUBMITTED,
@@ -160,11 +163,12 @@ enum submit_disposition {
160 163
161void 164void
162async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, 165async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
163 enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, 166 struct async_submit_ctl *submit)
164 dma_async_tx_callback cb_fn, void *cb_param)
165{ 167{
166 tx->callback = cb_fn; 168 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
167 tx->callback_param = cb_param; 169
170 tx->callback = submit->cb_fn;
171 tx->callback_param = submit->cb_param;
168 172
169 if (depend_tx) { 173 if (depend_tx) {
170 enum submit_disposition s; 174 enum submit_disposition s;
@@ -220,7 +224,7 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
220 tx->tx_submit(tx); 224 tx->tx_submit(tx);
221 } 225 }
222 226
223 if (flags & ASYNC_TX_ACK) 227 if (submit->flags & ASYNC_TX_ACK)
224 async_tx_ack(tx); 228 async_tx_ack(tx);
225 229
226 if (depend_tx) 230 if (depend_tx)
@@ -229,21 +233,20 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
229EXPORT_SYMBOL_GPL(async_tx_submit); 233EXPORT_SYMBOL_GPL(async_tx_submit);
230 234
231/** 235/**
232 * async_trigger_callback - schedules the callback function to be run after 236 * async_trigger_callback - schedules the callback function to be run
233 * any dependent operations have been completed. 237 * @submit: submission and completion parameters
234 * @flags: ASYNC_TX_ACK 238 *
235 * @depend_tx: 'callback' requires the completion of this transaction 239 * honored flags: ASYNC_TX_ACK
236 * @cb_fn: function to call after depend_tx completes 240 *
237 * @cb_param: parameter to pass to the callback routine 241 * The callback is run after any dependent operations have completed.
238 */ 242 */
239struct dma_async_tx_descriptor * 243struct dma_async_tx_descriptor *
240async_trigger_callback(enum async_tx_flags flags, 244async_trigger_callback(struct async_submit_ctl *submit)
241 struct dma_async_tx_descriptor *depend_tx,
242 dma_async_tx_callback cb_fn, void *cb_param)
243{ 245{
244 struct dma_chan *chan; 246 struct dma_chan *chan;
245 struct dma_device *device; 247 struct dma_device *device;
246 struct dma_async_tx_descriptor *tx; 248 struct dma_async_tx_descriptor *tx;
249 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
247 250
248 if (depend_tx) { 251 if (depend_tx) {
249 chan = depend_tx->chan; 252 chan = depend_tx->chan;
@@ -262,14 +265,14 @@ async_trigger_callback(enum async_tx_flags flags,
262 if (tx) { 265 if (tx) {
263 pr_debug("%s: (async)\n", __func__); 266 pr_debug("%s: (async)\n", __func__);
264 267
265 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 268 async_tx_submit(chan, tx, submit);
266 } else { 269 } else {
267 pr_debug("%s: (sync)\n", __func__); 270 pr_debug("%s: (sync)\n", __func__);
268 271
269 /* wait for any prerequisite operations */ 272 /* wait for any prerequisite operations */
270 async_tx_quiesce(&depend_tx); 273 async_tx_quiesce(&submit->depend_tx);
271 274
272 async_tx_sync_epilog(cb_fn, cb_param); 275 async_tx_sync_epilog(submit);
273 } 276 }
274 277
275 return tx; 278 return tx;