diff options
author | Dan Williams <dan.j.williams@intel.com> | 2008-04-17 23:17:25 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2008-04-17 16:25:05 -0400 |
commit | 19242d7233df7d658405d4b7ee1758d21414cfaa (patch) | |
tree | 4bffa2700c30fdb454dfa150115a0607c6cf3d2a /drivers | |
parent | 1c62979ed29a8e2bf9fbe1db101c81a0089676f8 (diff) |
async_tx: fix multiple dependency submission
Shrink struct dma_async_tx_descriptor and introduce
async_tx_channel_switch to properly inject a channel switch interrupt in
the descriptor stream. This simplifies the locking model as drivers no
longer need to handle dma_async_tx_descriptor.lock.
Acked-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/dma/dmaengine.c | 2 | ||||
-rw-r--r-- | drivers/dma/iop-adma.c | 9 |
2 files changed, 5 insertions, 6 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 8db0e7f9d3f4..9cb898a76bb3 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -600,8 +600,6 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | |||
600 | { | 600 | { |
601 | tx->chan = chan; | 601 | tx->chan = chan; |
602 | spin_lock_init(&tx->lock); | 602 | spin_lock_init(&tx->lock); |
603 | INIT_LIST_HEAD(&tx->depend_node); | ||
604 | INIT_LIST_HEAD(&tx->depend_list); | ||
605 | } | 603 | } |
606 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); | 604 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); |
607 | 605 | ||
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index f82b0906d466..21854cd7190f 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -63,7 +63,6 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, | |||
63 | struct iop_adma_chan *iop_chan, dma_cookie_t cookie) | 63 | struct iop_adma_chan *iop_chan, dma_cookie_t cookie) |
64 | { | 64 | { |
65 | BUG_ON(desc->async_tx.cookie < 0); | 65 | BUG_ON(desc->async_tx.cookie < 0); |
66 | spin_lock_bh(&desc->async_tx.lock); | ||
67 | if (desc->async_tx.cookie > 0) { | 66 | if (desc->async_tx.cookie > 0) { |
68 | cookie = desc->async_tx.cookie; | 67 | cookie = desc->async_tx.cookie; |
69 | desc->async_tx.cookie = 0; | 68 | desc->async_tx.cookie = 0; |
@@ -101,7 +100,6 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, | |||
101 | 100 | ||
102 | /* run dependent operations */ | 101 | /* run dependent operations */ |
103 | async_tx_run_dependencies(&desc->async_tx); | 102 | async_tx_run_dependencies(&desc->async_tx); |
104 | spin_unlock_bh(&desc->async_tx.lock); | ||
105 | 103 | ||
106 | return cookie; | 104 | return cookie; |
107 | } | 105 | } |
@@ -275,8 +273,11 @@ iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) | |||
275 | 273 | ||
276 | static void iop_adma_tasklet(unsigned long data) | 274 | static void iop_adma_tasklet(unsigned long data) |
277 | { | 275 | { |
278 | struct iop_adma_chan *chan = (struct iop_adma_chan *) data; | 276 | struct iop_adma_chan *iop_chan = (struct iop_adma_chan *) data; |
279 | __iop_adma_slot_cleanup(chan); | 277 | |
278 | spin_lock(&iop_chan->lock); | ||
279 | __iop_adma_slot_cleanup(iop_chan); | ||
280 | spin_unlock(&iop_chan->lock); | ||
280 | } | 281 | } |
281 | 282 | ||
282 | static struct iop_adma_desc_slot * | 283 | static struct iop_adma_desc_slot * |