diff options
Diffstat (limited to 'crypto/async_tx/async_tx.c')
-rw-r--r-- | crypto/async_tx/async_tx.c | 202 |
1 files changed, 165 insertions, 37 deletions
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 2be3bae89930..c6e772fc5ccd 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c | |||
@@ -89,13 +89,19 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | |||
89 | iter = tx; | 89 | iter = tx; |
90 | 90 | ||
91 | /* find the root of the unsubmitted dependency chain */ | 91 | /* find the root of the unsubmitted dependency chain */ |
92 | while (iter->cookie == -EBUSY) { | 92 | do { |
93 | parent = iter->parent; | 93 | parent = iter->parent; |
94 | if (parent && parent->cookie == -EBUSY) | 94 | if (!parent) |
95 | iter = iter->parent; | ||
96 | else | ||
97 | break; | 95 | break; |
98 | } | 96 | else |
97 | iter = parent; | ||
98 | } while (parent); | ||
99 | |||
100 | /* there is a small window for ->parent == NULL and | ||
101 | * ->cookie == -EBUSY | ||
102 | */ | ||
103 | while (iter->cookie == -EBUSY) | ||
104 | cpu_relax(); | ||
99 | 105 | ||
100 | status = dma_sync_wait(iter->chan, iter->cookie); | 106 | status = dma_sync_wait(iter->chan, iter->cookie); |
101 | } while (status == DMA_IN_PROGRESS || (iter != tx)); | 107 | } while (status == DMA_IN_PROGRESS || (iter != tx)); |
@@ -111,24 +117,33 @@ EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); | |||
111 | void | 117 | void |
112 | async_tx_run_dependencies(struct dma_async_tx_descriptor *tx) | 118 | async_tx_run_dependencies(struct dma_async_tx_descriptor *tx) |
113 | { | 119 | { |
114 | struct dma_async_tx_descriptor *dep_tx, *_dep_tx; | 120 | struct dma_async_tx_descriptor *next = tx->next; |
115 | struct dma_device *dev; | ||
116 | struct dma_chan *chan; | 121 | struct dma_chan *chan; |
117 | 122 | ||
118 | list_for_each_entry_safe(dep_tx, _dep_tx, &tx->depend_list, | 123 | if (!next) |
119 | depend_node) { | 124 | return; |
120 | chan = dep_tx->chan; | 125 | |
121 | dev = chan->device; | 126 | tx->next = NULL; |
122 | /* we can't depend on ourselves */ | 127 | chan = next->chan; |
123 | BUG_ON(chan == tx->chan); | 128 | |
124 | list_del(&dep_tx->depend_node); | 129 | /* keep submitting up until a channel switch is detected |
125 | tx->tx_submit(dep_tx); | 130 | * in that case we will be called again as a result of |
126 | 131 | * processing the interrupt from async_tx_channel_switch | |
127 | /* we need to poke the engine as client code does not | 132 | */ |
128 | * know about dependency submission events | 133 | while (next && next->chan == chan) { |
129 | */ | 134 | struct dma_async_tx_descriptor *_next; |
130 | dev->device_issue_pending(chan); | 135 | |
136 | spin_lock_bh(&next->lock); | ||
137 | next->parent = NULL; | ||
138 | _next = next->next; | ||
139 | next->next = NULL; | ||
140 | spin_unlock_bh(&next->lock); | ||
141 | |||
142 | next->tx_submit(next); | ||
143 | next = _next; | ||
131 | } | 144 | } |
145 | |||
146 | chan->device->device_issue_pending(chan); | ||
132 | } | 147 | } |
133 | EXPORT_SYMBOL_GPL(async_tx_run_dependencies); | 148 | EXPORT_SYMBOL_GPL(async_tx_run_dependencies); |
134 | 149 | ||
@@ -397,6 +412,92 @@ static void __exit async_tx_exit(void) | |||
397 | } | 412 | } |
398 | #endif | 413 | #endif |
399 | 414 | ||
415 | |||
416 | /** | ||
417 | * async_tx_channel_switch - queue an interrupt descriptor with a dependency | ||
418 | * pre-attached. | ||
419 | * @depend_tx: the operation that must finish before the new operation runs | ||
420 | * @tx: the new operation | ||
421 | */ | ||
422 | static void | ||
423 | async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, | ||
424 | struct dma_async_tx_descriptor *tx) | ||
425 | { | ||
426 | struct dma_chan *chan; | ||
427 | struct dma_device *device; | ||
428 | struct dma_async_tx_descriptor *intr_tx = (void *) ~0; | ||
429 | |||
430 | /* first check to see if we can still append to depend_tx */ | ||
431 | spin_lock_bh(&depend_tx->lock); | ||
432 | if (depend_tx->parent && depend_tx->chan == tx->chan) { | ||
433 | tx->parent = depend_tx; | ||
434 | depend_tx->next = tx; | ||
435 | intr_tx = NULL; | ||
436 | } | ||
437 | spin_unlock_bh(&depend_tx->lock); | ||
438 | |||
439 | if (!intr_tx) | ||
440 | return; | ||
441 | |||
442 | chan = depend_tx->chan; | ||
443 | device = chan->device; | ||
444 | |||
445 | /* see if we can schedule an interrupt | ||
446 | * otherwise poll for completion | ||
447 | */ | ||
448 | if (dma_has_cap(DMA_INTERRUPT, device->cap_mask)) | ||
449 | intr_tx = device->device_prep_dma_interrupt(chan, 0); | ||
450 | else | ||
451 | intr_tx = NULL; | ||
452 | |||
453 | if (intr_tx) { | ||
454 | intr_tx->callback = NULL; | ||
455 | intr_tx->callback_param = NULL; | ||
456 | tx->parent = intr_tx; | ||
457 | /* safe to set ->next outside the lock since we know we are | ||
458 | * not submitted yet | ||
459 | */ | ||
460 | intr_tx->next = tx; | ||
461 | |||
462 | /* check if we need to append */ | ||
463 | spin_lock_bh(&depend_tx->lock); | ||
464 | if (depend_tx->parent) { | ||
465 | intr_tx->parent = depend_tx; | ||
466 | depend_tx->next = intr_tx; | ||
467 | async_tx_ack(intr_tx); | ||
468 | intr_tx = NULL; | ||
469 | } | ||
470 | spin_unlock_bh(&depend_tx->lock); | ||
471 | |||
472 | if (intr_tx) { | ||
473 | intr_tx->parent = NULL; | ||
474 | intr_tx->tx_submit(intr_tx); | ||
475 | async_tx_ack(intr_tx); | ||
476 | } | ||
477 | } else { | ||
478 | if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) | ||
479 | panic("%s: DMA_ERROR waiting for depend_tx\n", | ||
480 | __func__); | ||
481 | tx->tx_submit(tx); | ||
482 | } | ||
483 | } | ||
484 | |||
485 | |||
486 | /** | ||
487 | * submit_disposition - while holding depend_tx->lock we must avoid submitting | ||
488 | * new operations to prevent a circular locking dependency with | ||
489 | * drivers that already hold a channel lock when calling | ||
490 | * async_tx_run_dependencies. | ||
491 | * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock | ||
492 | * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch | ||
493 | * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly | ||
494 | */ | ||
495 | enum submit_disposition { | ||
496 | ASYNC_TX_SUBMITTED, | ||
497 | ASYNC_TX_CHANNEL_SWITCH, | ||
498 | ASYNC_TX_DIRECT_SUBMIT, | ||
499 | }; | ||
500 | |||
400 | void | 501 | void |
401 | async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, | 502 | async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, |
402 | enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, | 503 | enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, |
@@ -405,28 +506,55 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, | |||
405 | tx->callback = cb_fn; | 506 | tx->callback = cb_fn; |
406 | tx->callback_param = cb_param; | 507 | tx->callback_param = cb_param; |
407 | 508 | ||
408 | /* set this new tx to run after depend_tx if: | 509 | if (depend_tx) { |
409 | * 1/ a dependency exists (depend_tx is !NULL) | 510 | enum submit_disposition s; |
410 | * 2/ the tx can not be submitted to the current channel | 511 | |
411 | */ | 512 | /* sanity check the dependency chain: |
412 | if (depend_tx && depend_tx->chan != chan) { | 513 | * 1/ if ack is already set then we cannot be sure |
413 | /* if ack is already set then we cannot be sure | ||
414 | * we are referring to the correct operation | 514 | * we are referring to the correct operation |
515 | * 2/ dependencies are 1:1 i.e. two transactions can | ||
516 | * not depend on the same parent | ||
415 | */ | 517 | */ |
416 | BUG_ON(depend_tx->ack); | 518 | BUG_ON(async_tx_test_ack(depend_tx) || depend_tx->next || |
519 | tx->parent); | ||
417 | 520 | ||
418 | tx->parent = depend_tx; | 521 | /* the lock prevents async_tx_run_dependencies from missing |
522 | * the setting of ->next when ->parent != NULL | ||
523 | */ | ||
419 | spin_lock_bh(&depend_tx->lock); | 524 | spin_lock_bh(&depend_tx->lock); |
420 | list_add_tail(&tx->depend_node, &depend_tx->depend_list); | 525 | if (depend_tx->parent) { |
421 | if (depend_tx->cookie == 0) { | 526 | /* we have a parent so we can not submit directly |
422 | struct dma_chan *dep_chan = depend_tx->chan; | 527 | * if we are staying on the same channel: append |
423 | struct dma_device *dep_dev = dep_chan->device; | 528 | * else: channel switch |
424 | dep_dev->device_dependency_added(dep_chan); | 529 | */ |
530 | if (depend_tx->chan == chan) { | ||
531 | tx->parent = depend_tx; | ||
532 | depend_tx->next = tx; | ||
533 | s = ASYNC_TX_SUBMITTED; | ||
534 | } else | ||
535 | s = ASYNC_TX_CHANNEL_SWITCH; | ||
536 | } else { | ||
537 | /* we do not have a parent so we may be able to submit | ||
538 | * directly if we are staying on the same channel | ||
539 | */ | ||
540 | if (depend_tx->chan == chan) | ||
541 | s = ASYNC_TX_DIRECT_SUBMIT; | ||
542 | else | ||
543 | s = ASYNC_TX_CHANNEL_SWITCH; | ||
425 | } | 544 | } |
426 | spin_unlock_bh(&depend_tx->lock); | 545 | spin_unlock_bh(&depend_tx->lock); |
427 | 546 | ||
428 | /* schedule an interrupt to trigger the channel switch */ | 547 | switch (s) { |
429 | async_trigger_callback(ASYNC_TX_ACK, depend_tx, NULL, NULL); | 548 | case ASYNC_TX_SUBMITTED: |
549 | break; | ||
550 | case ASYNC_TX_CHANNEL_SWITCH: | ||
551 | async_tx_channel_switch(depend_tx, tx); | ||
552 | break; | ||
553 | case ASYNC_TX_DIRECT_SUBMIT: | ||
554 | tx->parent = NULL; | ||
555 | tx->tx_submit(tx); | ||
556 | break; | ||
557 | } | ||
430 | } else { | 558 | } else { |
431 | tx->parent = NULL; | 559 | tx->parent = NULL; |
432 | tx->tx_submit(tx); | 560 | tx->tx_submit(tx); |
@@ -467,7 +595,7 @@ async_trigger_callback(enum async_tx_flags flags, | |||
467 | if (device && !dma_has_cap(DMA_INTERRUPT, device->cap_mask)) | 595 | if (device && !dma_has_cap(DMA_INTERRUPT, device->cap_mask)) |
468 | device = NULL; | 596 | device = NULL; |
469 | 597 | ||
470 | tx = device ? device->device_prep_dma_interrupt(chan) : NULL; | 598 | tx = device ? device->device_prep_dma_interrupt(chan, 0) : NULL; |
471 | } else | 599 | } else |
472 | tx = NULL; | 600 | tx = NULL; |
473 | 601 | ||
@@ -483,7 +611,7 @@ async_trigger_callback(enum async_tx_flags flags, | |||
483 | /* if ack is already set then we cannot be sure | 611 | /* if ack is already set then we cannot be sure |
484 | * we are referring to the correct operation | 612 | * we are referring to the correct operation |
485 | */ | 613 | */ |
486 | BUG_ON(depend_tx->ack); | 614 | BUG_ON(async_tx_test_ack(depend_tx)); |
487 | if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) | 615 | if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) |
488 | panic("%s: DMA_ERROR waiting for depend_tx\n", | 616 | panic("%s: DMA_ERROR waiting for depend_tx\n", |
489 | __func__); | 617 | __func__); |