diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-21 20:05:46 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-21 20:05:46 -0400 |
commit | 6f68fbaafbaa033205cd131d3e1f3c4b914e9b78 (patch) | |
tree | 56b434496064ed170f94381e3ec4c6c340b71376 /crypto/async_tx | |
parent | 6e4513972a5ad28517477d21f301a02ac7a0df76 (diff) | |
parent | 0b28330e39bbe0ffee4c56b09fc415fcec595ea3 (diff) |
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx:
DMAENGINE: extend the control command to include an arg
async_tx: trim dma_async_tx_descriptor in 'no channel switch' case
DMAENGINE: DMA40 fix for allocation of logical channel 0
DMAENGINE: DMA40 support paused channel status
dmaengine: mpc512x: Use resource_size
DMA ENGINE: Do not reset 'private' of channel
ioat: Remove duplicated devm_kzalloc() calls for ioatdma_device
ioat3: disable cacheline-unaligned transfers for raid operations
ioat2,3: convert to producer/consumer locking
ioat: convert to circ_buf
DMAENGINE: Support for ST-Ericssons DMA40 block v3
async_tx: use of kzalloc/kfree requires the include of slab.h
dmaengine: provide helper for setting txstate
DMAENGINE: generic channel status v2
DMAENGINE: generic slave control v2
dma: timb-dma: Update comment and fix compiler warning
dma: Add timb-dma
DMAENGINE: COH 901 318 fix bytesleft
DMAENGINE: COH 901 318 rename confusing vars
Diffstat (limited to 'crypto/async_tx')
-rw-r--r-- | crypto/async_tx/async_tx.c | 46 |
1 files changed, 19 insertions, 27 deletions
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index f9cdf04fe7c0..7f2c00a45205 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c | |||
@@ -81,18 +81,13 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, | |||
81 | struct dma_device *device = chan->device; | 81 | struct dma_device *device = chan->device; |
82 | struct dma_async_tx_descriptor *intr_tx = (void *) ~0; | 82 | struct dma_async_tx_descriptor *intr_tx = (void *) ~0; |
83 | 83 | ||
84 | #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH | ||
85 | BUG(); | ||
86 | #endif | ||
87 | |||
88 | /* first check to see if we can still append to depend_tx */ | 84 | /* first check to see if we can still append to depend_tx */ |
89 | spin_lock_bh(&depend_tx->lock); | 85 | txd_lock(depend_tx); |
90 | if (depend_tx->parent && depend_tx->chan == tx->chan) { | 86 | if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) { |
91 | tx->parent = depend_tx; | 87 | txd_chain(depend_tx, tx); |
92 | depend_tx->next = tx; | ||
93 | intr_tx = NULL; | 88 | intr_tx = NULL; |
94 | } | 89 | } |
95 | spin_unlock_bh(&depend_tx->lock); | 90 | txd_unlock(depend_tx); |
96 | 91 | ||
97 | /* attached dependency, flush the parent channel */ | 92 | /* attached dependency, flush the parent channel */ |
98 | if (!intr_tx) { | 93 | if (!intr_tx) { |
@@ -111,24 +106,22 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, | |||
111 | if (intr_tx) { | 106 | if (intr_tx) { |
112 | intr_tx->callback = NULL; | 107 | intr_tx->callback = NULL; |
113 | intr_tx->callback_param = NULL; | 108 | intr_tx->callback_param = NULL; |
114 | tx->parent = intr_tx; | 109 | /* safe to chain outside the lock since we know we are |
115 | /* safe to set ->next outside the lock since we know we are | ||
116 | * not submitted yet | 110 | * not submitted yet |
117 | */ | 111 | */ |
118 | intr_tx->next = tx; | 112 | txd_chain(intr_tx, tx); |
119 | 113 | ||
120 | /* check if we need to append */ | 114 | /* check if we need to append */ |
121 | spin_lock_bh(&depend_tx->lock); | 115 | txd_lock(depend_tx); |
122 | if (depend_tx->parent) { | 116 | if (txd_parent(depend_tx)) { |
123 | intr_tx->parent = depend_tx; | 117 | txd_chain(depend_tx, intr_tx); |
124 | depend_tx->next = intr_tx; | ||
125 | async_tx_ack(intr_tx); | 118 | async_tx_ack(intr_tx); |
126 | intr_tx = NULL; | 119 | intr_tx = NULL; |
127 | } | 120 | } |
128 | spin_unlock_bh(&depend_tx->lock); | 121 | txd_unlock(depend_tx); |
129 | 122 | ||
130 | if (intr_tx) { | 123 | if (intr_tx) { |
131 | intr_tx->parent = NULL; | 124 | txd_clear_parent(intr_tx); |
132 | intr_tx->tx_submit(intr_tx); | 125 | intr_tx->tx_submit(intr_tx); |
133 | async_tx_ack(intr_tx); | 126 | async_tx_ack(intr_tx); |
134 | } | 127 | } |
@@ -176,21 +169,20 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, | |||
176 | * 2/ dependencies are 1:1 i.e. two transactions can | 169 | * 2/ dependencies are 1:1 i.e. two transactions can |
177 | * not depend on the same parent | 170 | * not depend on the same parent |
178 | */ | 171 | */ |
179 | BUG_ON(async_tx_test_ack(depend_tx) || depend_tx->next || | 172 | BUG_ON(async_tx_test_ack(depend_tx) || txd_next(depend_tx) || |
180 | tx->parent); | 173 | txd_parent(tx)); |
181 | 174 | ||
182 | /* the lock prevents async_tx_run_dependencies from missing | 175 | /* the lock prevents async_tx_run_dependencies from missing |
183 | * the setting of ->next when ->parent != NULL | 176 | * the setting of ->next when ->parent != NULL |
184 | */ | 177 | */ |
185 | spin_lock_bh(&depend_tx->lock); | 178 | txd_lock(depend_tx); |
186 | if (depend_tx->parent) { | 179 | if (txd_parent(depend_tx)) { |
187 | /* we have a parent so we can not submit directly | 180 | /* we have a parent so we can not submit directly |
188 | * if we are staying on the same channel: append | 181 | * if we are staying on the same channel: append |
189 | * else: channel switch | 182 | * else: channel switch |
190 | */ | 183 | */ |
191 | if (depend_tx->chan == chan) { | 184 | if (depend_tx->chan == chan) { |
192 | tx->parent = depend_tx; | 185 | txd_chain(depend_tx, tx); |
193 | depend_tx->next = tx; | ||
194 | s = ASYNC_TX_SUBMITTED; | 186 | s = ASYNC_TX_SUBMITTED; |
195 | } else | 187 | } else |
196 | s = ASYNC_TX_CHANNEL_SWITCH; | 188 | s = ASYNC_TX_CHANNEL_SWITCH; |
@@ -203,7 +195,7 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, | |||
203 | else | 195 | else |
204 | s = ASYNC_TX_CHANNEL_SWITCH; | 196 | s = ASYNC_TX_CHANNEL_SWITCH; |
205 | } | 197 | } |
206 | spin_unlock_bh(&depend_tx->lock); | 198 | txd_unlock(depend_tx); |
207 | 199 | ||
208 | switch (s) { | 200 | switch (s) { |
209 | case ASYNC_TX_SUBMITTED: | 201 | case ASYNC_TX_SUBMITTED: |
@@ -212,12 +204,12 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, | |||
212 | async_tx_channel_switch(depend_tx, tx); | 204 | async_tx_channel_switch(depend_tx, tx); |
213 | break; | 205 | break; |
214 | case ASYNC_TX_DIRECT_SUBMIT: | 206 | case ASYNC_TX_DIRECT_SUBMIT: |
215 | tx->parent = NULL; | 207 | txd_clear_parent(tx); |
216 | tx->tx_submit(tx); | 208 | tx->tx_submit(tx); |
217 | break; | 209 | break; |
218 | } | 210 | } |
219 | } else { | 211 | } else { |
220 | tx->parent = NULL; | 212 | txd_clear_parent(tx); |
221 | tx->tx_submit(tx); | 213 | tx->tx_submit(tx); |
222 | } | 214 | } |
223 | 215 | ||