diff options
-rw-r--r-- | crypto/async_tx/async_tx.c | 197 | ||||
-rw-r--r-- | drivers/dma/dmaengine.c | 2 | ||||
-rw-r--r-- | drivers/dma/iop-adma.c | 9 | ||||
-rw-r--r-- | include/linux/dmaengine.h | 9 |
4 files changed, 170 insertions, 47 deletions
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 2be3bae89930..69756164b61d 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c | |||
@@ -89,13 +89,19 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | |||
89 | iter = tx; | 89 | iter = tx; |
90 | 90 | ||
91 | /* find the root of the unsubmitted dependency chain */ | 91 | /* find the root of the unsubmitted dependency chain */ |
92 | while (iter->cookie == -EBUSY) { | 92 | do { |
93 | parent = iter->parent; | 93 | parent = iter->parent; |
94 | if (parent && parent->cookie == -EBUSY) | 94 | if (!parent) |
95 | iter = iter->parent; | ||
96 | else | ||
97 | break; | 95 | break; |
98 | } | 96 | else |
97 | iter = parent; | ||
98 | } while (parent); | ||
99 | |||
100 | /* there is a small window for ->parent == NULL and | ||
101 | * ->cookie == -EBUSY | ||
102 | */ | ||
103 | while (iter->cookie == -EBUSY) | ||
104 | cpu_relax(); | ||
99 | 105 | ||
100 | status = dma_sync_wait(iter->chan, iter->cookie); | 106 | status = dma_sync_wait(iter->chan, iter->cookie); |
101 | } while (status == DMA_IN_PROGRESS || (iter != tx)); | 107 | } while (status == DMA_IN_PROGRESS || (iter != tx)); |
@@ -111,24 +117,33 @@ EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); | |||
111 | void | 117 | void |
112 | async_tx_run_dependencies(struct dma_async_tx_descriptor *tx) | 118 | async_tx_run_dependencies(struct dma_async_tx_descriptor *tx) |
113 | { | 119 | { |
114 | struct dma_async_tx_descriptor *dep_tx, *_dep_tx; | 120 | struct dma_async_tx_descriptor *next = tx->next; |
115 | struct dma_device *dev; | ||
116 | struct dma_chan *chan; | 121 | struct dma_chan *chan; |
117 | 122 | ||
118 | list_for_each_entry_safe(dep_tx, _dep_tx, &tx->depend_list, | 123 | if (!next) |
119 | depend_node) { | 124 | return; |
120 | chan = dep_tx->chan; | 125 | |
121 | dev = chan->device; | 126 | tx->next = NULL; |
122 | /* we can't depend on ourselves */ | 127 | chan = next->chan; |
123 | BUG_ON(chan == tx->chan); | 128 | |
124 | list_del(&dep_tx->depend_node); | 129 | /* keep submitting up until a channel switch is detected |
125 | tx->tx_submit(dep_tx); | 130 | * in that case we will be called again as a result of |
126 | 131 | * processing the interrupt from async_tx_channel_switch | |
127 | /* we need to poke the engine as client code does not | 132 | */ |
128 | * know about dependency submission events | 133 | while (next && next->chan == chan) { |
129 | */ | 134 | struct dma_async_tx_descriptor *_next; |
130 | dev->device_issue_pending(chan); | 135 | |
136 | spin_lock_bh(&next->lock); | ||
137 | next->parent = NULL; | ||
138 | _next = next->next; | ||
139 | next->next = NULL; | ||
140 | spin_unlock_bh(&next->lock); | ||
141 | |||
142 | next->tx_submit(next); | ||
143 | next = _next; | ||
131 | } | 144 | } |
145 | |||
146 | chan->device->device_issue_pending(chan); | ||
132 | } | 147 | } |
133 | EXPORT_SYMBOL_GPL(async_tx_run_dependencies); | 148 | EXPORT_SYMBOL_GPL(async_tx_run_dependencies); |
134 | 149 | ||
@@ -397,6 +412,92 @@ static void __exit async_tx_exit(void) | |||
397 | } | 412 | } |
398 | #endif | 413 | #endif |
399 | 414 | ||
415 | |||
416 | /** | ||
417 | * async_tx_channel_switch - queue an interrupt descriptor with a dependency | ||
418 | * pre-attached. | ||
419 | * @depend_tx: the operation that must finish before the new operation runs | ||
420 | * @tx: the new operation | ||
421 | */ | ||
422 | static void | ||
423 | async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, | ||
424 | struct dma_async_tx_descriptor *tx) | ||
425 | { | ||
426 | struct dma_chan *chan; | ||
427 | struct dma_device *device; | ||
428 | struct dma_async_tx_descriptor *intr_tx = (void *) ~0; | ||
429 | |||
430 | /* first check to see if we can still append to depend_tx */ | ||
431 | spin_lock_bh(&depend_tx->lock); | ||
432 | if (depend_tx->parent && depend_tx->chan == tx->chan) { | ||
433 | tx->parent = depend_tx; | ||
434 | depend_tx->next = tx; | ||
435 | intr_tx = NULL; | ||
436 | } | ||
437 | spin_unlock_bh(&depend_tx->lock); | ||
438 | |||
439 | if (!intr_tx) | ||
440 | return; | ||
441 | |||
442 | chan = depend_tx->chan; | ||
443 | device = chan->device; | ||
444 | |||
445 | /* see if we can schedule an interrupt | ||
446 | * otherwise poll for completion | ||
447 | */ | ||
448 | if (dma_has_cap(DMA_INTERRUPT, device->cap_mask)) | ||
449 | intr_tx = device->device_prep_dma_interrupt(chan); | ||
450 | else | ||
451 | intr_tx = NULL; | ||
452 | |||
453 | if (intr_tx) { | ||
454 | intr_tx->callback = NULL; | ||
455 | intr_tx->callback_param = NULL; | ||
456 | tx->parent = intr_tx; | ||
457 | /* safe to set ->next outside the lock since we know we are | ||
458 | * not submitted yet | ||
459 | */ | ||
460 | intr_tx->next = tx; | ||
461 | |||
462 | /* check if we need to append */ | ||
463 | spin_lock_bh(&depend_tx->lock); | ||
464 | if (depend_tx->parent) { | ||
465 | intr_tx->parent = depend_tx; | ||
466 | depend_tx->next = intr_tx; | ||
467 | async_tx_ack(intr_tx); | ||
468 | intr_tx = NULL; | ||
469 | } | ||
470 | spin_unlock_bh(&depend_tx->lock); | ||
471 | |||
472 | if (intr_tx) { | ||
473 | intr_tx->parent = NULL; | ||
474 | intr_tx->tx_submit(intr_tx); | ||
475 | async_tx_ack(intr_tx); | ||
476 | } | ||
477 | } else { | ||
478 | if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) | ||
479 | panic("%s: DMA_ERROR waiting for depend_tx\n", | ||
480 | __func__); | ||
481 | tx->tx_submit(tx); | ||
482 | } | ||
483 | } | ||
484 | |||
485 | |||
486 | /** | ||
487 | * submit_disposition - while holding depend_tx->lock we must avoid submitting | ||
488 | * new operations to prevent a circular locking dependency with | ||
489 | * drivers that already hold a channel lock when calling | ||
490 | * async_tx_run_dependencies. | ||
491 | * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock | ||
492 | * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch | ||
493 | * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly | ||
494 | */ | ||
495 | enum submit_disposition { | ||
496 | ASYNC_TX_SUBMITTED, | ||
497 | ASYNC_TX_CHANNEL_SWITCH, | ||
498 | ASYNC_TX_DIRECT_SUBMIT, | ||
499 | }; | ||
500 | |||
400 | void | 501 | void |
401 | async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, | 502 | async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, |
402 | enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, | 503 | enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, |
@@ -405,28 +506,54 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, | |||
405 | tx->callback = cb_fn; | 506 | tx->callback = cb_fn; |
406 | tx->callback_param = cb_param; | 507 | tx->callback_param = cb_param; |
407 | 508 | ||
408 | /* set this new tx to run after depend_tx if: | 509 | if (depend_tx) { |
409 | * 1/ a dependency exists (depend_tx is !NULL) | 510 | enum submit_disposition s; |
410 | * 2/ the tx can not be submitted to the current channel | 511 | |
411 | */ | 512 | /* sanity check the dependency chain: |
412 | if (depend_tx && depend_tx->chan != chan) { | 513 | * 1/ if ack is already set then we cannot be sure |
413 | /* if ack is already set then we cannot be sure | ||
414 | * we are referring to the correct operation | 514 | * we are referring to the correct operation |
515 | * 2/ dependencies are 1:1 i.e. two transactions can | ||
516 | * not depend on the same parent | ||
415 | */ | 517 | */ |
416 | BUG_ON(depend_tx->ack); | 518 | BUG_ON(depend_tx->ack || depend_tx->next || tx->parent); |
417 | 519 | ||
418 | tx->parent = depend_tx; | 520 | /* the lock prevents async_tx_run_dependencies from missing |
521 | * the setting of ->next when ->parent != NULL | ||
522 | */ | ||
419 | spin_lock_bh(&depend_tx->lock); | 523 | spin_lock_bh(&depend_tx->lock); |
420 | list_add_tail(&tx->depend_node, &depend_tx->depend_list); | 524 | if (depend_tx->parent) { |
421 | if (depend_tx->cookie == 0) { | 525 | /* we have a parent so we can not submit directly |
422 | struct dma_chan *dep_chan = depend_tx->chan; | 526 | * if we are staying on the same channel: append |
423 | struct dma_device *dep_dev = dep_chan->device; | 527 | * else: channel switch |
424 | dep_dev->device_dependency_added(dep_chan); | 528 | */ |
529 | if (depend_tx->chan == chan) { | ||
530 | tx->parent = depend_tx; | ||
531 | depend_tx->next = tx; | ||
532 | s = ASYNC_TX_SUBMITTED; | ||
533 | } else | ||
534 | s = ASYNC_TX_CHANNEL_SWITCH; | ||
535 | } else { | ||
536 | /* we do not have a parent so we may be able to submit | ||
537 | * directly if we are staying on the same channel | ||
538 | */ | ||
539 | if (depend_tx->chan == chan) | ||
540 | s = ASYNC_TX_DIRECT_SUBMIT; | ||
541 | else | ||
542 | s = ASYNC_TX_CHANNEL_SWITCH; | ||
425 | } | 543 | } |
426 | spin_unlock_bh(&depend_tx->lock); | 544 | spin_unlock_bh(&depend_tx->lock); |
427 | 545 | ||
428 | /* schedule an interrupt to trigger the channel switch */ | 546 | switch (s) { |
429 | async_trigger_callback(ASYNC_TX_ACK, depend_tx, NULL, NULL); | 547 | case ASYNC_TX_SUBMITTED: |
548 | break; | ||
549 | case ASYNC_TX_CHANNEL_SWITCH: | ||
550 | async_tx_channel_switch(depend_tx, tx); | ||
551 | break; | ||
552 | case ASYNC_TX_DIRECT_SUBMIT: | ||
553 | tx->parent = NULL; | ||
554 | tx->tx_submit(tx); | ||
555 | break; | ||
556 | } | ||
430 | } else { | 557 | } else { |
431 | tx->parent = NULL; | 558 | tx->parent = NULL; |
432 | tx->tx_submit(tx); | 559 | tx->tx_submit(tx); |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 8db0e7f9d3f4..9cb898a76bb3 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -600,8 +600,6 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | |||
600 | { | 600 | { |
601 | tx->chan = chan; | 601 | tx->chan = chan; |
602 | spin_lock_init(&tx->lock); | 602 | spin_lock_init(&tx->lock); |
603 | INIT_LIST_HEAD(&tx->depend_node); | ||
604 | INIT_LIST_HEAD(&tx->depend_list); | ||
605 | } | 603 | } |
606 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); | 604 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); |
607 | 605 | ||
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index f82b0906d466..21854cd7190f 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -63,7 +63,6 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, | |||
63 | struct iop_adma_chan *iop_chan, dma_cookie_t cookie) | 63 | struct iop_adma_chan *iop_chan, dma_cookie_t cookie) |
64 | { | 64 | { |
65 | BUG_ON(desc->async_tx.cookie < 0); | 65 | BUG_ON(desc->async_tx.cookie < 0); |
66 | spin_lock_bh(&desc->async_tx.lock); | ||
67 | if (desc->async_tx.cookie > 0) { | 66 | if (desc->async_tx.cookie > 0) { |
68 | cookie = desc->async_tx.cookie; | 67 | cookie = desc->async_tx.cookie; |
69 | desc->async_tx.cookie = 0; | 68 | desc->async_tx.cookie = 0; |
@@ -101,7 +100,6 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, | |||
101 | 100 | ||
102 | /* run dependent operations */ | 101 | /* run dependent operations */ |
103 | async_tx_run_dependencies(&desc->async_tx); | 102 | async_tx_run_dependencies(&desc->async_tx); |
104 | spin_unlock_bh(&desc->async_tx.lock); | ||
105 | 103 | ||
106 | return cookie; | 104 | return cookie; |
107 | } | 105 | } |
@@ -275,8 +273,11 @@ iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) | |||
275 | 273 | ||
276 | static void iop_adma_tasklet(unsigned long data) | 274 | static void iop_adma_tasklet(unsigned long data) |
277 | { | 275 | { |
278 | struct iop_adma_chan *chan = (struct iop_adma_chan *) data; | 276 | struct iop_adma_chan *iop_chan = (struct iop_adma_chan *) data; |
279 | __iop_adma_slot_cleanup(chan); | 277 | |
278 | spin_lock(&iop_chan->lock); | ||
279 | __iop_adma_slot_cleanup(iop_chan); | ||
280 | spin_unlock(&iop_chan->lock); | ||
280 | } | 281 | } |
281 | 282 | ||
282 | static struct iop_adma_desc_slot * | 283 | static struct iop_adma_desc_slot * |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 34d440698293..91252a7e4d03 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -221,11 +221,9 @@ typedef void (*dma_async_tx_callback)(void *dma_async_param); | |||
221 | * @callback: routine to call after this operation is complete | 221 | * @callback: routine to call after this operation is complete |
222 | * @callback_param: general parameter to pass to the callback routine | 222 | * @callback_param: general parameter to pass to the callback routine |
223 | * ---async_tx api specific fields--- | 223 | * ---async_tx api specific fields--- |
224 | * @depend_list: at completion this list of transactions are submitted | 224 | * @next: at completion submit this descriptor |
225 | * @depend_node: allow this transaction to be executed after another | ||
226 | * transaction has completed, possibly on another channel | ||
227 | * @parent: pointer to the next level up in the dependency chain | 225 | * @parent: pointer to the next level up in the dependency chain |
228 | * @lock: protect the dependency list | 226 | * @lock: protect the parent and next pointers |
229 | */ | 227 | */ |
230 | struct dma_async_tx_descriptor { | 228 | struct dma_async_tx_descriptor { |
231 | dma_cookie_t cookie; | 229 | dma_cookie_t cookie; |
@@ -236,8 +234,7 @@ struct dma_async_tx_descriptor { | |||
236 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); | 234 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); |
237 | dma_async_tx_callback callback; | 235 | dma_async_tx_callback callback; |
238 | void *callback_param; | 236 | void *callback_param; |
239 | struct list_head depend_list; | 237 | struct dma_async_tx_descriptor *next; |
240 | struct list_head depend_node; | ||
241 | struct dma_async_tx_descriptor *parent; | 238 | struct dma_async_tx_descriptor *parent; |
242 | spinlock_t lock; | 239 | spinlock_t lock; |
243 | }; | 240 | }; |