diff options
author | Dan Williams <dan.j.williams@intel.com> | 2009-09-08 20:42:51 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-09-08 20:42:51 -0400 |
commit | 138f4c359d23d2ec38d18bd70dd9613ae515fe93 (patch) | |
tree | ad7fafba6eac74d9d92ade839a65171466d67a70 /crypto/async_tx | |
parent | 0403e3827788d878163f9ef0541b748b0f88ca5d (diff) |
dmaengine, async_tx: add a "no channel switch" allocator
Channel switching is problematic for some dmaengine drivers as the
architecture precludes separating the ->prep from ->submit. In these
cases the driver can select ASYNC_TX_DISABLE_CHANNEL_SWITCH to modify
the async_tx allocator to only return channels that support all of the
required asynchronous operations.
For example MD_RAID456=y selects support for asynchronous xor, xor
validate, pq, pq validate, and memcpy. When
ASYNC_TX_DISABLE_CHANNEL_SWITCH=y any channel with all these
capabilities is marked DMA_ASYNC_TX allowing async_tx_find_channel() to
quickly locate compatible channels with the guarantee that dependency
chains will remain on one channel. When
ASYNC_TX_DISABLE_CHANNEL_SWITCH=n async_tx_find_channel() may select
channels that lead to operation chains that need to cross channel
boundaries using the async_tx channel switch capability.
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'crypto/async_tx')
-rw-r--r-- | crypto/async_tx/async_tx.c | 4 |
1 files changed, 4 insertions, 0 deletions
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 60615fedcf5e..f9cdf04fe7c0 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c | |||
@@ -81,6 +81,10 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, | |||
81 | struct dma_device *device = chan->device; | 81 | struct dma_device *device = chan->device; |
82 | struct dma_async_tx_descriptor *intr_tx = (void *) ~0; | 82 | struct dma_async_tx_descriptor *intr_tx = (void *) ~0; |
83 | 83 | ||
84 | #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH | ||
85 | BUG(); | ||
86 | #endif | ||
87 | |||
84 | /* first check to see if we can still append to depend_tx */ | 88 | /* first check to see if we can still append to depend_tx */ |
85 | spin_lock_bh(&depend_tx->lock); | 89 | spin_lock_bh(&depend_tx->lock); |
86 | if (depend_tx->parent && depend_tx->chan == tx->chan) { | 90 | if (depend_tx->parent && depend_tx->chan == tx->chan) { |