diff options
author | Dan Williams <dan.j.williams@intel.com> | 2009-07-14 15:19:02 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-08-29 22:09:27 -0400 |
commit | 95475e57113c66aac7583925736ed2e2d58c990d (patch) | |
tree | 933aa0ca3bffef5b1457c516fbe3e8690b4c4cb1 /crypto/async_tx | |
parent | af1f951eb6ef27b01cbfb3f6c21b770af4368a6d (diff) |
async_tx: remove walk of tx->parent chain in dma_wait_for_async_tx
We currently walk the parent chain when waiting for a given tx to
complete however this walk may race with the driver cleanup routine.
The routines in async_raid6_recov.c may fall back to the synchronous
path at any point so we need to be prepared to call async_tx_quiesce()
(which calls dma_wait_for_async_tx). To remove the ->parent walk we
guarantee that every time a dependency is attached ->issue_pending() is
invoked, then we can simply poll the initial descriptor until
completion.
This also allows for a lighter weight 'issue pending' implementation as
there is no longer a requirement to iterate through all the channels'
->issue_pending() routines as long as operations have been submitted in
an ordered chain. async_tx_issue_pending() is added for this case.
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'crypto/async_tx')
-rw-r--r-- | crypto/async_tx/async_tx.c | 13 |
1 files changed, 7 insertions, 6 deletions
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 6e37ad3f4417..60615fedcf5e 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c | |||
@@ -77,8 +77,8 @@ static void | |||
77 | async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, | 77 | async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, |
78 | struct dma_async_tx_descriptor *tx) | 78 | struct dma_async_tx_descriptor *tx) |
79 | { | 79 | { |
80 | struct dma_chan *chan; | 80 | struct dma_chan *chan = depend_tx->chan; |
81 | struct dma_device *device; | 81 | struct dma_device *device = chan->device; |
82 | struct dma_async_tx_descriptor *intr_tx = (void *) ~0; | 82 | struct dma_async_tx_descriptor *intr_tx = (void *) ~0; |
83 | 83 | ||
84 | /* first check to see if we can still append to depend_tx */ | 84 | /* first check to see if we can still append to depend_tx */ |
@@ -90,11 +90,11 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, | |||
90 | } | 90 | } |
91 | spin_unlock_bh(&depend_tx->lock); | 91 | spin_unlock_bh(&depend_tx->lock); |
92 | 92 | ||
93 | if (!intr_tx) | 93 | /* attached dependency, flush the parent channel */ |
94 | if (!intr_tx) { | ||
95 | device->device_issue_pending(chan); | ||
94 | return; | 96 | return; |
95 | 97 | } | |
96 | chan = depend_tx->chan; | ||
97 | device = chan->device; | ||
98 | 98 | ||
99 | /* see if we can schedule an interrupt | 99 | /* see if we can schedule an interrupt |
100 | * otherwise poll for completion | 100 | * otherwise poll for completion |
@@ -128,6 +128,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, | |||
128 | intr_tx->tx_submit(intr_tx); | 128 | intr_tx->tx_submit(intr_tx); |
129 | async_tx_ack(intr_tx); | 129 | async_tx_ack(intr_tx); |
130 | } | 130 | } |
131 | device->device_issue_pending(chan); | ||
131 | } else { | 132 | } else { |
132 | if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) | 133 | if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) |
133 | panic("%s: DMA_ERROR waiting for depend_tx\n", | 134 | panic("%s: DMA_ERROR waiting for depend_tx\n", |