aboutsummaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-01-06 13:38:14 -0500
committerDan Williams <dan.j.williams@intel.com>2009-01-06 13:38:14 -0500
commit6f49a57aa5a0c6d4e4e27c85f7af6c83325a12d1 (patch)
treeafba24357d1f4ff69ccb2b39a19542546590a50b /crypto
parent07f2211e4fbce6990722d78c4f04225da9c0e9cf (diff)
dmaengine: up-level reference counting to the module level
Simply, if a client wants any dmaengine channel then prevent all dmaengine modules from being removed. Once the clients are done re-enable module removal. Why?, beyond reducing complication: 1/ Tracking reference counts per-transaction in an efficient manner, as is currently done, requires a complicated scheme to avoid cache-line bouncing effects. 2/ Per-transaction ref-counting gives the false impression that a dma-driver can be gracefully removed ahead of its user (net, md, or dma-slave) 3/ None of the in-tree dma-drivers talk to hot pluggable hardware, but if such an engine were built one day we still would not need to notify clients of remove events. The driver can simply return NULL to a ->prep() request, something that is much easier for a client to handle. Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Maciej Sosnowski <maciej.sosnowski@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'crypto')
-rw-r--r--crypto/async_tx/async_tx.c4
1 files changed, 0 insertions, 4 deletions
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 8cfac182165d..43fe4cbe71e6 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -198,8 +198,6 @@ dma_channel_add_remove(struct dma_client *client,
198 /* add the channel to the generic management list */ 198 /* add the channel to the generic management list */
199 master_ref = kmalloc(sizeof(*master_ref), GFP_KERNEL); 199 master_ref = kmalloc(sizeof(*master_ref), GFP_KERNEL);
200 if (master_ref) { 200 if (master_ref) {
201 /* keep a reference until async_tx is unloaded */
202 dma_chan_get(chan);
203 init_dma_chan_ref(master_ref, chan); 201 init_dma_chan_ref(master_ref, chan);
204 spin_lock_irqsave(&async_tx_lock, flags); 202 spin_lock_irqsave(&async_tx_lock, flags);
205 list_add_tail_rcu(&master_ref->node, 203 list_add_tail_rcu(&master_ref->node,
@@ -221,8 +219,6 @@ dma_channel_add_remove(struct dma_client *client,
221 spin_lock_irqsave(&async_tx_lock, flags); 219 spin_lock_irqsave(&async_tx_lock, flags);
222 list_for_each_entry(ref, &async_tx_master_list, node) 220 list_for_each_entry(ref, &async_tx_master_list, node)
223 if (ref->chan == chan) { 221 if (ref->chan == chan) {
224 /* permit backing devices to go away */
225 dma_chan_put(ref->chan);
226 list_del_rcu(&ref->node); 222 list_del_rcu(&ref->node);
227 call_rcu(&ref->rcu, free_dma_chan_ref); 223 call_rcu(&ref->rcu, free_dma_chan_ref);
228 found = 1; 224 found = 1;