diff options
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/Kconfig | 2 | ||||
-rw-r--r-- | crypto/async_tx/async_raid6_recov.c | 21 | ||||
-rw-r--r-- | crypto/async_tx/async_tx.c | 46 |
3 files changed, 33 insertions, 36 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig index 403857ad06d4..9d9434f08c92 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
@@ -28,7 +28,7 @@ config CRYPTO_FIPS | |||
28 | This options enables the fips boot option which is | 28 | This options enables the fips boot option which is |
29 | required if you want to system to operate in a FIPS 200 | 29 | required if you want to system to operate in a FIPS 200 |
30 | certification. You should say no unless you know what | 30 | certification. You should say no unless you know what |
31 | this is. Note that CRYPTO_ANSI_CPRNG is requred if this | 31 | this is. Note that CRYPTO_ANSI_CPRNG is required if this |
32 | option is selected | 32 | option is selected |
33 | 33 | ||
34 | config CRYPTO_ALGAPI | 34 | config CRYPTO_ALGAPI |
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c index 943f2abac9b4..ce038d861eb9 100644 --- a/crypto/async_tx/async_raid6_recov.c +++ b/crypto/async_tx/async_raid6_recov.c | |||
@@ -324,6 +324,7 @@ struct dma_async_tx_descriptor * | |||
324 | async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, | 324 | async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, |
325 | struct page **blocks, struct async_submit_ctl *submit) | 325 | struct page **blocks, struct async_submit_ctl *submit) |
326 | { | 326 | { |
327 | void *scribble = submit->scribble; | ||
327 | int non_zero_srcs, i; | 328 | int non_zero_srcs, i; |
328 | 329 | ||
329 | BUG_ON(faila == failb); | 330 | BUG_ON(faila == failb); |
@@ -332,11 +333,13 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, | |||
332 | 333 | ||
333 | pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); | 334 | pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); |
334 | 335 | ||
335 | /* we need to preserve the contents of 'blocks' for the async | 336 | /* if a dma resource is not available or a scribble buffer is not |
336 | * case, so punt to synchronous if a scribble buffer is not available | 337 | * available punt to the synchronous path. In the 'dma not |
338 | * available' case be sure to use the scribble buffer to | ||
339 | * preserve the content of 'blocks' as the caller intended. | ||
337 | */ | 340 | */ |
338 | if (!submit->scribble) { | 341 | if (!async_dma_find_channel(DMA_PQ) || !scribble) { |
339 | void **ptrs = (void **) blocks; | 342 | void **ptrs = scribble ? scribble : (void **) blocks; |
340 | 343 | ||
341 | async_tx_quiesce(&submit->depend_tx); | 344 | async_tx_quiesce(&submit->depend_tx); |
342 | for (i = 0; i < disks; i++) | 345 | for (i = 0; i < disks; i++) |
@@ -406,11 +409,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, | |||
406 | 409 | ||
407 | pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); | 410 | pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); |
408 | 411 | ||
409 | /* we need to preserve the contents of 'blocks' for the async | 412 | /* if a dma resource is not available or a scribble buffer is not |
410 | * case, so punt to synchronous if a scribble buffer is not available | 413 | * available punt to the synchronous path. In the 'dma not |
414 | * available' case be sure to use the scribble buffer to | ||
415 | * preserve the content of 'blocks' as the caller intended. | ||
411 | */ | 416 | */ |
412 | if (!scribble) { | 417 | if (!async_dma_find_channel(DMA_PQ) || !scribble) { |
413 | void **ptrs = (void **) blocks; | 418 | void **ptrs = scribble ? scribble : (void **) blocks; |
414 | 419 | ||
415 | async_tx_quiesce(&submit->depend_tx); | 420 | async_tx_quiesce(&submit->depend_tx); |
416 | for (i = 0; i < disks; i++) | 421 | for (i = 0; i < disks; i++) |
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index f9cdf04fe7c0..7f2c00a45205 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c | |||
@@ -81,18 +81,13 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, | |||
81 | struct dma_device *device = chan->device; | 81 | struct dma_device *device = chan->device; |
82 | struct dma_async_tx_descriptor *intr_tx = (void *) ~0; | 82 | struct dma_async_tx_descriptor *intr_tx = (void *) ~0; |
83 | 83 | ||
84 | #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH | ||
85 | BUG(); | ||
86 | #endif | ||
87 | |||
88 | /* first check to see if we can still append to depend_tx */ | 84 | /* first check to see if we can still append to depend_tx */ |
89 | spin_lock_bh(&depend_tx->lock); | 85 | txd_lock(depend_tx); |
90 | if (depend_tx->parent && depend_tx->chan == tx->chan) { | 86 | if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) { |
91 | tx->parent = depend_tx; | 87 | txd_chain(depend_tx, tx); |
92 | depend_tx->next = tx; | ||
93 | intr_tx = NULL; | 88 | intr_tx = NULL; |
94 | } | 89 | } |
95 | spin_unlock_bh(&depend_tx->lock); | 90 | txd_unlock(depend_tx); |
96 | 91 | ||
97 | /* attached dependency, flush the parent channel */ | 92 | /* attached dependency, flush the parent channel */ |
98 | if (!intr_tx) { | 93 | if (!intr_tx) { |
@@ -111,24 +106,22 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, | |||
111 | if (intr_tx) { | 106 | if (intr_tx) { |
112 | intr_tx->callback = NULL; | 107 | intr_tx->callback = NULL; |
113 | intr_tx->callback_param = NULL; | 108 | intr_tx->callback_param = NULL; |
114 | tx->parent = intr_tx; | 109 | /* safe to chain outside the lock since we know we are |
115 | /* safe to set ->next outside the lock since we know we are | ||
116 | * not submitted yet | 110 | * not submitted yet |
117 | */ | 111 | */ |
118 | intr_tx->next = tx; | 112 | txd_chain(intr_tx, tx); |
119 | 113 | ||
120 | /* check if we need to append */ | 114 | /* check if we need to append */ |
121 | spin_lock_bh(&depend_tx->lock); | 115 | txd_lock(depend_tx); |
122 | if (depend_tx->parent) { | 116 | if (txd_parent(depend_tx)) { |
123 | intr_tx->parent = depend_tx; | 117 | txd_chain(depend_tx, intr_tx); |
124 | depend_tx->next = intr_tx; | ||
125 | async_tx_ack(intr_tx); | 118 | async_tx_ack(intr_tx); |
126 | intr_tx = NULL; | 119 | intr_tx = NULL; |
127 | } | 120 | } |
128 | spin_unlock_bh(&depend_tx->lock); | 121 | txd_unlock(depend_tx); |
129 | 122 | ||
130 | if (intr_tx) { | 123 | if (intr_tx) { |
131 | intr_tx->parent = NULL; | 124 | txd_clear_parent(intr_tx); |
132 | intr_tx->tx_submit(intr_tx); | 125 | intr_tx->tx_submit(intr_tx); |
133 | async_tx_ack(intr_tx); | 126 | async_tx_ack(intr_tx); |
134 | } | 127 | } |
@@ -176,21 +169,20 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, | |||
176 | * 2/ dependencies are 1:1 i.e. two transactions can | 169 | * 2/ dependencies are 1:1 i.e. two transactions can |
177 | * not depend on the same parent | 170 | * not depend on the same parent |
178 | */ | 171 | */ |
179 | BUG_ON(async_tx_test_ack(depend_tx) || depend_tx->next || | 172 | BUG_ON(async_tx_test_ack(depend_tx) || txd_next(depend_tx) || |
180 | tx->parent); | 173 | txd_parent(tx)); |
181 | 174 | ||
182 | /* the lock prevents async_tx_run_dependencies from missing | 175 | /* the lock prevents async_tx_run_dependencies from missing |
183 | * the setting of ->next when ->parent != NULL | 176 | * the setting of ->next when ->parent != NULL |
184 | */ | 177 | */ |
185 | spin_lock_bh(&depend_tx->lock); | 178 | txd_lock(depend_tx); |
186 | if (depend_tx->parent) { | 179 | if (txd_parent(depend_tx)) { |
187 | /* we have a parent so we can not submit directly | 180 | /* we have a parent so we can not submit directly |
188 | * if we are staying on the same channel: append | 181 | * if we are staying on the same channel: append |
189 | * else: channel switch | 182 | * else: channel switch |
190 | */ | 183 | */ |
191 | if (depend_tx->chan == chan) { | 184 | if (depend_tx->chan == chan) { |
192 | tx->parent = depend_tx; | 185 | txd_chain(depend_tx, tx); |
193 | depend_tx->next = tx; | ||
194 | s = ASYNC_TX_SUBMITTED; | 186 | s = ASYNC_TX_SUBMITTED; |
195 | } else | 187 | } else |
196 | s = ASYNC_TX_CHANNEL_SWITCH; | 188 | s = ASYNC_TX_CHANNEL_SWITCH; |
@@ -203,7 +195,7 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, | |||
203 | else | 195 | else |
204 | s = ASYNC_TX_CHANNEL_SWITCH; | 196 | s = ASYNC_TX_CHANNEL_SWITCH; |
205 | } | 197 | } |
206 | spin_unlock_bh(&depend_tx->lock); | 198 | txd_unlock(depend_tx); |
207 | 199 | ||
208 | switch (s) { | 200 | switch (s) { |
209 | case ASYNC_TX_SUBMITTED: | 201 | case ASYNC_TX_SUBMITTED: |
@@ -212,12 +204,12 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, | |||
212 | async_tx_channel_switch(depend_tx, tx); | 204 | async_tx_channel_switch(depend_tx, tx); |
213 | break; | 205 | break; |
214 | case ASYNC_TX_DIRECT_SUBMIT: | 206 | case ASYNC_TX_DIRECT_SUBMIT: |
215 | tx->parent = NULL; | 207 | txd_clear_parent(tx); |
216 | tx->tx_submit(tx); | 208 | tx->tx_submit(tx); |
217 | break; | 209 | break; |
218 | } | 210 | } |
219 | } else { | 211 | } else { |
220 | tx->parent = NULL; | 212 | txd_clear_parent(tx); |
221 | tx->tx_submit(tx); | 213 | tx->tx_submit(tx); |
222 | } | 214 | } |
223 | 215 | ||