aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorLeilei Zhao <leilei.zhao@atmel.com>2015-04-07 05:45:10 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2015-04-08 10:20:04 -0400
commit289b2623df34ebec4c25b7d31804b70fc90b92c6 (patch)
tree6d45df75cece75529ccfd449a9987055e0abfaf6 /drivers/crypto
parent8a10eb8d36ffc86512ae66a35c8888f07b8ec921 (diff)
crypto: atmel-aes - sync the buf used in DMA or CPU
The input buffer and output buffer are mapped for DMA transfer in Atmel AES driver. But they are also be used by CPU when the requested crypt length is not bigger than the threshold value 16. The buffers will be cached in cache line when CPU accessed them. When DMA uses the buffers again, the memory can happened to be flushed by cache while DMA starts transfer. So using API dma_sync_single_for_device and dma_sync_single_for_cpu in DMA to ensure DMA coherence and CPU always access the correct value. This fix the issue that the encrypted result periodically goes wrong when doing performance test with OpenSSH. Signed-off-by: Leilei Zhao <leilei.zhao@atmel.com> Acked-by: Nicolas Ferre <nicolas.ferre@atmel.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/atmel-aes.c16
1 files changed, 12 insertions, 4 deletions
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 15c7dbd0a13d..fb760664d28f 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -315,10 +315,10 @@ static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd,
315 315
316 dd->dma_size = length; 316 dd->dma_size = length;
317 317
318 if (!(dd->flags & AES_FLAGS_FAST)) { 318 dma_sync_single_for_device(dd->dev, dma_addr_in, length,
319 dma_sync_single_for_device(dd->dev, dma_addr_in, length, 319 DMA_TO_DEVICE);
320 DMA_TO_DEVICE); 320 dma_sync_single_for_device(dd->dev, dma_addr_out, length,
321 } 321 DMA_FROM_DEVICE);
322 322
323 if (dd->flags & AES_FLAGS_CFB8) { 323 if (dd->flags & AES_FLAGS_CFB8) {
324 dd->dma_lch_in.dma_conf.dst_addr_width = 324 dd->dma_lch_in.dma_conf.dst_addr_width =
@@ -391,6 +391,11 @@ static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
391{ 391{
392 dd->flags &= ~AES_FLAGS_DMA; 392 dd->flags &= ~AES_FLAGS_DMA;
393 393
394 dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in,
395 dd->dma_size, DMA_TO_DEVICE);
396 dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out,
397 dd->dma_size, DMA_FROM_DEVICE);
398
394 /* use cache buffers */ 399 /* use cache buffers */
395 dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg); 400 dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg);
396 if (!dd->nb_in_sg) 401 if (!dd->nb_in_sg)
@@ -459,6 +464,9 @@ static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd)
459 dd->flags |= AES_FLAGS_FAST; 464 dd->flags |= AES_FLAGS_FAST;
460 465
461 } else { 466 } else {
467 dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in,
468 dd->dma_size, DMA_TO_DEVICE);
469
462 /* use cache buffers */ 470 /* use cache buffers */
463 count = atmel_aes_sg_copy(&dd->in_sg, &dd->in_offset, 471 count = atmel_aes_sg_copy(&dd->in_sg, &dd->in_offset,
464 dd->buf_in, dd->buflen, dd->total, 0); 472 dd->buf_in, dd->buflen, dd->total, 0);