summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/inside-secure
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-07-08 23:57:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-08 23:57:08 -0400
commit4d2fa8b44b891f0da5ceda3e5a1402ccf0ab6f26 (patch)
treecbb763ec5e74cfbaac6ce53df277883cb78a8a1a /drivers/crypto/inside-secure
parent8b68150883ca466a23e90902dd4113b22e692f04 (diff)
parentf3880a23564e3172437285ebcb5b8a124539fdae (diff)
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "Here is the crypto update for 5.3: API: - Test shash interface directly in testmgr - cra_driver_name is now mandatory Algorithms: - Replace arc4 crypto_cipher with library helper - Implement 5 way interleave for ECB, CBC and CTR on arm64 - Add xxhash - Add continuous self-test on noise source to drbg - Update jitter RNG Drivers: - Add support for SHA204A random number generator - Add support for 7211 in iproc-rng200 - Fix fuzz test failures in inside-secure - Fix fuzz test failures in talitos - Fix fuzz test failures in qat" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (143 commits) crypto: stm32/hash - remove interruptible condition for dma crypto: stm32/hash - Fix hmac issue more than 256 bytes crypto: stm32/crc32 - rename driver file crypto: amcc - remove memset after dma_alloc_coherent crypto: ccp - Switch to SPDX license identifiers crypto: ccp - Validate the the error value used to index error messages crypto: doc - Fix formatting of new crypto engine content crypto: doc - Add parameter documentation crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR crypto: arm64/aes-ce - add 5 way interleave routines crypto: talitos - drop icv_ool crypto: talitos - fix hash on SEC1. crypto: talitos - move struct talitos_edesc into talitos.h lib/scatterlist: Fix mapping iterator when sg->offset is greater than PAGE_SIZE crypto/NX: Set receive window credits to max number of CRBs in RxFIFO crypto: asymmetric_keys - select CRYPTO_HASH where needed crypto: serpent - mark __serpent_setkey_sbox noinline crypto: testmgr - dynamically allocate crypto_shash crypto: testmgr - dynamically allocate testvec_config crypto: talitos - eliminate unneeded 'done' functions at build time ...
Diffstat (limited to 'drivers/crypto/inside-secure')
-rw-r--r--drivers/crypto/inside-secure/safexcel.c13
-rw-r--r--drivers/crypto/inside-secure/safexcel.h17
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c116
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c92
-rw-r--r--drivers/crypto/inside-secure/safexcel_ring.c3
5 files changed, 157 insertions, 84 deletions
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 86c699c14f84..df43a2c6933b 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -398,6 +398,12 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
398 398
399 /* Processing Engine configuration */ 399 /* Processing Engine configuration */
400 400
401 /* Token & context configuration */
402 val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES |
403 EIP197_PE_EIP96_TOKEN_CTRL_REUSE_CTX |
404 EIP197_PE_EIP96_TOKEN_CTRL_POST_REUSE_CTX;
405 writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe));
406
401 /* H/W capabilities selection */ 407 /* H/W capabilities selection */
402 val = EIP197_FUNCTION_RSVD; 408 val = EIP197_FUNCTION_RSVD;
403 val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY; 409 val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
@@ -589,9 +595,9 @@ inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
589 if (rdesc->result_data.error_code & 0x407f) { 595 if (rdesc->result_data.error_code & 0x407f) {
590 /* Fatal error (bits 0-7, 14) */ 596 /* Fatal error (bits 0-7, 14) */
591 dev_err(priv->dev, 597 dev_err(priv->dev,
592 "cipher: result: result descriptor error (%d)\n", 598 "cipher: result: result descriptor error (0x%x)\n",
593 rdesc->result_data.error_code); 599 rdesc->result_data.error_code);
594 return -EIO; 600 return -EINVAL;
595 } else if (rdesc->result_data.error_code == BIT(9)) { 601 } else if (rdesc->result_data.error_code == BIT(9)) {
596 /* Authentication failed */ 602 /* Authentication failed */
597 return -EBADMSG; 603 return -EBADMSG;
@@ -720,11 +726,10 @@ handle_results:
720 } 726 }
721 727
722acknowledge: 728acknowledge:
723 if (i) { 729 if (i)
724 writel(EIP197_xDR_PROC_xD_PKT(i) | 730 writel(EIP197_xDR_PROC_xD_PKT(i) |
725 EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset), 731 EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
726 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT); 732 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
727 }
728 733
729 /* If the number of requests overflowed the counter, try to proceed more 734 /* If the number of requests overflowed the counter, try to proceed more
730 * requests. 735 * requests.
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 65624a81f0fd..e0c202f33674 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -118,6 +118,7 @@
118#define EIP197_PE_ICE_SCRATCH_CTRL(n) (0x0d04 + (0x2000 * (n))) 118#define EIP197_PE_ICE_SCRATCH_CTRL(n) (0x0d04 + (0x2000 * (n)))
119#define EIP197_PE_ICE_FPP_CTRL(n) (0x0d80 + (0x2000 * (n))) 119#define EIP197_PE_ICE_FPP_CTRL(n) (0x0d80 + (0x2000 * (n)))
120#define EIP197_PE_ICE_RAM_CTRL(n) (0x0ff0 + (0x2000 * (n))) 120#define EIP197_PE_ICE_RAM_CTRL(n) (0x0ff0 + (0x2000 * (n)))
121#define EIP197_PE_EIP96_TOKEN_CTRL(n) (0x1000 + (0x2000 * (n)))
121#define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n))) 122#define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n)))
122#define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n))) 123#define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n)))
123#define EIP197_PE_EIP96_CONTEXT_STAT(n) (0x100c + (0x2000 * (n))) 124#define EIP197_PE_EIP96_CONTEXT_STAT(n) (0x100c + (0x2000 * (n)))
@@ -249,6 +250,11 @@
249#define EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN BIT(0) 250#define EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN BIT(0)
250#define EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN BIT(1) 251#define EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN BIT(1)
251 252
253/* EIP197_PE_EIP96_TOKEN_CTRL */
254#define EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES BIT(16)
255#define EIP197_PE_EIP96_TOKEN_CTRL_REUSE_CTX BIT(19)
256#define EIP197_PE_EIP96_TOKEN_CTRL_POST_REUSE_CTX BIT(20)
257
252/* EIP197_PE_EIP96_FUNCTION_EN */ 258/* EIP197_PE_EIP96_FUNCTION_EN */
253#define EIP197_FUNCTION_RSVD (BIT(6) | BIT(15) | BIT(20) | BIT(23)) 259#define EIP197_FUNCTION_RSVD (BIT(6) | BIT(15) | BIT(20) | BIT(23))
254#define EIP197_PROTOCOL_HASH_ONLY BIT(0) 260#define EIP197_PROTOCOL_HASH_ONLY BIT(0)
@@ -333,6 +339,7 @@ struct safexcel_context_record {
333#define CONTEXT_CONTROL_IV3 BIT(8) 339#define CONTEXT_CONTROL_IV3 BIT(8)
334#define CONTEXT_CONTROL_DIGEST_CNT BIT(9) 340#define CONTEXT_CONTROL_DIGEST_CNT BIT(9)
335#define CONTEXT_CONTROL_COUNTER_MODE BIT(10) 341#define CONTEXT_CONTROL_COUNTER_MODE BIT(10)
342#define CONTEXT_CONTROL_CRYPTO_STORE BIT(12)
336#define CONTEXT_CONTROL_HASH_STORE BIT(19) 343#define CONTEXT_CONTROL_HASH_STORE BIT(19)
337 344
338/* The hash counter given to the engine in the context has a granularity of 345/* The hash counter given to the engine in the context has a granularity of
@@ -425,6 +432,10 @@ struct safexcel_token {
425 432
426#define EIP197_TOKEN_HASH_RESULT_VERIFY BIT(16) 433#define EIP197_TOKEN_HASH_RESULT_VERIFY BIT(16)
427 434
435#define EIP197_TOKEN_CTX_OFFSET(x) (x)
436#define EIP197_TOKEN_DIRECTION_EXTERNAL BIT(11)
437#define EIP197_TOKEN_EXEC_IF_SUCCESSFUL (0x1 << 12)
438
428#define EIP197_TOKEN_STAT_LAST_HASH BIT(0) 439#define EIP197_TOKEN_STAT_LAST_HASH BIT(0)
429#define EIP197_TOKEN_STAT_LAST_PACKET BIT(1) 440#define EIP197_TOKEN_STAT_LAST_PACKET BIT(1)
430#define EIP197_TOKEN_OPCODE_DIRECTION 0x0 441#define EIP197_TOKEN_OPCODE_DIRECTION 0x0
@@ -432,6 +443,7 @@ struct safexcel_token {
432#define EIP197_TOKEN_OPCODE_NOOP EIP197_TOKEN_OPCODE_INSERT 443#define EIP197_TOKEN_OPCODE_NOOP EIP197_TOKEN_OPCODE_INSERT
433#define EIP197_TOKEN_OPCODE_RETRIEVE 0x4 444#define EIP197_TOKEN_OPCODE_RETRIEVE 0x4
434#define EIP197_TOKEN_OPCODE_VERIFY 0xd 445#define EIP197_TOKEN_OPCODE_VERIFY 0xd
446#define EIP197_TOKEN_OPCODE_CTX_ACCESS 0xe
435#define EIP197_TOKEN_OPCODE_BYPASS GENMASK(3, 0) 447#define EIP197_TOKEN_OPCODE_BYPASS GENMASK(3, 0)
436 448
437static inline void eip197_noop_token(struct safexcel_token *token) 449static inline void eip197_noop_token(struct safexcel_token *token)
@@ -442,6 +454,8 @@ static inline void eip197_noop_token(struct safexcel_token *token)
442 454
443/* Instructions */ 455/* Instructions */
444#define EIP197_TOKEN_INS_INSERT_HASH_DIGEST 0x1c 456#define EIP197_TOKEN_INS_INSERT_HASH_DIGEST 0x1c
457#define EIP197_TOKEN_INS_ORIGIN_IV0 0x14
458#define EIP197_TOKEN_INS_ORIGIN_LEN(x) ((x) << 5)
445#define EIP197_TOKEN_INS_TYPE_OUTPUT BIT(5) 459#define EIP197_TOKEN_INS_TYPE_OUTPUT BIT(5)
446#define EIP197_TOKEN_INS_TYPE_HASH BIT(6) 460#define EIP197_TOKEN_INS_TYPE_HASH BIT(6)
447#define EIP197_TOKEN_INS_TYPE_CRYTO BIT(7) 461#define EIP197_TOKEN_INS_TYPE_CRYTO BIT(7)
@@ -468,6 +482,7 @@ struct safexcel_control_data_desc {
468 482
469#define EIP197_OPTION_MAGIC_VALUE BIT(0) 483#define EIP197_OPTION_MAGIC_VALUE BIT(0)
470#define EIP197_OPTION_64BIT_CTX BIT(1) 484#define EIP197_OPTION_64BIT_CTX BIT(1)
485#define EIP197_OPTION_RC_AUTO (0x2 << 3)
471#define EIP197_OPTION_CTX_CTRL_IN_CMD BIT(8) 486#define EIP197_OPTION_CTX_CTRL_IN_CMD BIT(8)
472#define EIP197_OPTION_2_TOKEN_IV_CMD GENMASK(11, 10) 487#define EIP197_OPTION_2_TOKEN_IV_CMD GENMASK(11, 10)
473#define EIP197_OPTION_4_TOKEN_IV_CMD GENMASK(11, 9) 488#define EIP197_OPTION_4_TOKEN_IV_CMD GENMASK(11, 9)
@@ -629,7 +644,7 @@ struct safexcel_ahash_export_state {
629 u32 digest; 644 u32 digest;
630 645
631 u32 state[SHA512_DIGEST_SIZE / sizeof(u32)]; 646 u32 state[SHA512_DIGEST_SIZE / sizeof(u32)];
632 u8 cache[SHA512_BLOCK_SIZE]; 647 u8 cache[SHA512_BLOCK_SIZE << 1];
633}; 648};
634 649
635/* 650/*
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index de4be10b172f..8cdbdbe35681 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -51,6 +51,8 @@ struct safexcel_cipher_ctx {
51 51
52struct safexcel_cipher_req { 52struct safexcel_cipher_req {
53 enum safexcel_cipher_direction direction; 53 enum safexcel_cipher_direction direction;
54 /* Number of result descriptors associated to the request */
55 unsigned int rdescs;
54 bool needs_inv; 56 bool needs_inv;
55}; 57};
56 58
@@ -59,27 +61,26 @@ static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
59 u32 length) 61 u32 length)
60{ 62{
61 struct safexcel_token *token; 63 struct safexcel_token *token;
62 unsigned offset = 0; 64 u32 offset = 0, block_sz = 0;
63 65
64 if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) { 66 if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
65 switch (ctx->alg) { 67 switch (ctx->alg) {
66 case SAFEXCEL_DES: 68 case SAFEXCEL_DES:
67 offset = DES_BLOCK_SIZE / sizeof(u32); 69 block_sz = DES_BLOCK_SIZE;
68 memcpy(cdesc->control_data.token, iv, DES_BLOCK_SIZE);
69 cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD; 70 cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
70 break; 71 break;
71 case SAFEXCEL_3DES: 72 case SAFEXCEL_3DES:
72 offset = DES3_EDE_BLOCK_SIZE / sizeof(u32); 73 block_sz = DES3_EDE_BLOCK_SIZE;
73 memcpy(cdesc->control_data.token, iv, DES3_EDE_BLOCK_SIZE);
74 cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD; 74 cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
75 break; 75 break;
76
77 case SAFEXCEL_AES: 76 case SAFEXCEL_AES:
78 offset = AES_BLOCK_SIZE / sizeof(u32); 77 block_sz = AES_BLOCK_SIZE;
79 memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE);
80 cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; 78 cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
81 break; 79 break;
82 } 80 }
81
82 offset = block_sz / sizeof(u32);
83 memcpy(cdesc->control_data.token, iv, block_sz);
83 } 84 }
84 85
85 token = (struct safexcel_token *)(cdesc->control_data.token + offset); 86 token = (struct safexcel_token *)(cdesc->control_data.token + offset);
@@ -91,6 +92,25 @@ static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
91 token[0].instructions = EIP197_TOKEN_INS_LAST | 92 token[0].instructions = EIP197_TOKEN_INS_LAST |
92 EIP197_TOKEN_INS_TYPE_CRYTO | 93 EIP197_TOKEN_INS_TYPE_CRYTO |
93 EIP197_TOKEN_INS_TYPE_OUTPUT; 94 EIP197_TOKEN_INS_TYPE_OUTPUT;
95
96 if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
97 u32 last = (EIP197_MAX_TOKENS - 1) - offset;
98
99 token[last].opcode = EIP197_TOKEN_OPCODE_CTX_ACCESS;
100 token[last].packet_length = EIP197_TOKEN_DIRECTION_EXTERNAL |
101 EIP197_TOKEN_EXEC_IF_SUCCESSFUL|
102 EIP197_TOKEN_CTX_OFFSET(0x2);
103 token[last].stat = EIP197_TOKEN_STAT_LAST_HASH |
104 EIP197_TOKEN_STAT_LAST_PACKET;
105 token[last].instructions =
106 EIP197_TOKEN_INS_ORIGIN_LEN(block_sz / sizeof(u32)) |
107 EIP197_TOKEN_INS_ORIGIN_IV0;
108
109 /* Store the updated IV values back in the internal context
110 * registers.
111 */
112 cdesc->control_data.control1 |= CONTEXT_CONTROL_CRYPTO_STORE;
113 }
94} 114}
95 115
96static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv, 116static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
@@ -333,7 +353,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
333 353
334 *ret = 0; 354 *ret = 0;
335 355
336 do { 356 if (unlikely(!sreq->rdescs))
357 return 0;
358
359 while (sreq->rdescs--) {
337 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); 360 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
338 if (IS_ERR(rdesc)) { 361 if (IS_ERR(rdesc)) {
339 dev_err(priv->dev, 362 dev_err(priv->dev,
@@ -346,21 +369,15 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
346 *ret = safexcel_rdesc_check_errors(priv, rdesc); 369 *ret = safexcel_rdesc_check_errors(priv, rdesc);
347 370
348 ndesc++; 371 ndesc++;
349 } while (!rdesc->last_seg); 372 }
350 373
351 safexcel_complete(priv, ring); 374 safexcel_complete(priv, ring);
352 375
353 if (src == dst) { 376 if (src == dst) {
354 dma_unmap_sg(priv->dev, src, 377 dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_BIDIRECTIONAL);
355 sg_nents_for_len(src, cryptlen),
356 DMA_BIDIRECTIONAL);
357 } else { 378 } else {
358 dma_unmap_sg(priv->dev, src, 379 dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_TO_DEVICE);
359 sg_nents_for_len(src, cryptlen), 380 dma_unmap_sg(priv->dev, dst, sg_nents(dst), DMA_FROM_DEVICE);
360 DMA_TO_DEVICE);
361 dma_unmap_sg(priv->dev, dst,
362 sg_nents_for_len(dst, cryptlen),
363 DMA_FROM_DEVICE);
364 } 381 }
365 382
366 *should_complete = true; 383 *should_complete = true;
@@ -385,26 +402,21 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
385 int i, ret = 0; 402 int i, ret = 0;
386 403
387 if (src == dst) { 404 if (src == dst) {
388 nr_src = dma_map_sg(priv->dev, src, 405 nr_src = dma_map_sg(priv->dev, src, sg_nents(src),
389 sg_nents_for_len(src, totlen),
390 DMA_BIDIRECTIONAL); 406 DMA_BIDIRECTIONAL);
391 nr_dst = nr_src; 407 nr_dst = nr_src;
392 if (!nr_src) 408 if (!nr_src)
393 return -EINVAL; 409 return -EINVAL;
394 } else { 410 } else {
395 nr_src = dma_map_sg(priv->dev, src, 411 nr_src = dma_map_sg(priv->dev, src, sg_nents(src),
396 sg_nents_for_len(src, totlen),
397 DMA_TO_DEVICE); 412 DMA_TO_DEVICE);
398 if (!nr_src) 413 if (!nr_src)
399 return -EINVAL; 414 return -EINVAL;
400 415
401 nr_dst = dma_map_sg(priv->dev, dst, 416 nr_dst = dma_map_sg(priv->dev, dst, sg_nents(dst),
402 sg_nents_for_len(dst, totlen),
403 DMA_FROM_DEVICE); 417 DMA_FROM_DEVICE);
404 if (!nr_dst) { 418 if (!nr_dst) {
405 dma_unmap_sg(priv->dev, src, 419 dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE);
406 sg_nents_for_len(src, totlen),
407 DMA_TO_DEVICE);
408 return -EINVAL; 420 return -EINVAL;
409 } 421 }
410 } 422 }
@@ -454,7 +466,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
454 466
455 /* result descriptors */ 467 /* result descriptors */
456 for_each_sg(dst, sg, nr_dst, i) { 468 for_each_sg(dst, sg, nr_dst, i) {
457 bool first = !i, last = (i == nr_dst - 1); 469 bool first = !i, last = sg_is_last(sg);
458 u32 len = sg_dma_len(sg); 470 u32 len = sg_dma_len(sg);
459 471
460 rdesc = safexcel_add_rdesc(priv, ring, first, last, 472 rdesc = safexcel_add_rdesc(priv, ring, first, last,
@@ -483,16 +495,10 @@ cdesc_rollback:
483 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr); 495 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
484 496
485 if (src == dst) { 497 if (src == dst) {
486 dma_unmap_sg(priv->dev, src, 498 dma_unmap_sg(priv->dev, src, nr_src, DMA_BIDIRECTIONAL);
487 sg_nents_for_len(src, totlen),
488 DMA_BIDIRECTIONAL);
489 } else { 499 } else {
490 dma_unmap_sg(priv->dev, src, 500 dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE);
491 sg_nents_for_len(src, totlen), 501 dma_unmap_sg(priv->dev, dst, nr_dst, DMA_FROM_DEVICE);
492 DMA_TO_DEVICE);
493 dma_unmap_sg(priv->dev, dst,
494 sg_nents_for_len(dst, totlen),
495 DMA_FROM_DEVICE);
496 } 502 }
497 503
498 return ret; 504 return ret;
@@ -501,6 +507,7 @@ cdesc_rollback:
501static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, 507static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
502 int ring, 508 int ring,
503 struct crypto_async_request *base, 509 struct crypto_async_request *base,
510 struct safexcel_cipher_req *sreq,
504 bool *should_complete, int *ret) 511 bool *should_complete, int *ret)
505{ 512{
506 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm); 513 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
@@ -509,7 +516,10 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
509 516
510 *ret = 0; 517 *ret = 0;
511 518
512 do { 519 if (unlikely(!sreq->rdescs))
520 return 0;
521
522 while (sreq->rdescs--) {
513 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); 523 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
514 if (IS_ERR(rdesc)) { 524 if (IS_ERR(rdesc)) {
515 dev_err(priv->dev, 525 dev_err(priv->dev,
@@ -522,7 +532,7 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
522 *ret = safexcel_rdesc_check_errors(priv, rdesc); 532 *ret = safexcel_rdesc_check_errors(priv, rdesc);
523 533
524 ndesc++; 534 ndesc++;
525 } while (!rdesc->last_seg); 535 }
526 536
527 safexcel_complete(priv, ring); 537 safexcel_complete(priv, ring);
528 538
@@ -560,16 +570,35 @@ static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv,
560{ 570{
561 struct skcipher_request *req = skcipher_request_cast(async); 571 struct skcipher_request *req = skcipher_request_cast(async);
562 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); 572 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
573 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(async->tfm);
563 int err; 574 int err;
564 575
565 if (sreq->needs_inv) { 576 if (sreq->needs_inv) {
566 sreq->needs_inv = false; 577 sreq->needs_inv = false;
567 err = safexcel_handle_inv_result(priv, ring, async, 578 err = safexcel_handle_inv_result(priv, ring, async, sreq,
568 should_complete, ret); 579 should_complete, ret);
569 } else { 580 } else {
570 err = safexcel_handle_req_result(priv, ring, async, req->src, 581 err = safexcel_handle_req_result(priv, ring, async, req->src,
571 req->dst, req->cryptlen, sreq, 582 req->dst, req->cryptlen, sreq,
572 should_complete, ret); 583 should_complete, ret);
584
585 if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
586 u32 block_sz = 0;
587
588 switch (ctx->alg) {
589 case SAFEXCEL_DES:
590 block_sz = DES_BLOCK_SIZE;
591 break;
592 case SAFEXCEL_3DES:
593 block_sz = DES3_EDE_BLOCK_SIZE;
594 break;
595 case SAFEXCEL_AES:
596 block_sz = AES_BLOCK_SIZE;
597 break;
598 }
599
600 memcpy(req->iv, ctx->base.ctxr->data, block_sz);
601 }
573 } 602 }
574 603
575 return err; 604 return err;
@@ -587,7 +616,7 @@ static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv,
587 616
588 if (sreq->needs_inv) { 617 if (sreq->needs_inv) {
589 sreq->needs_inv = false; 618 sreq->needs_inv = false;
590 err = safexcel_handle_inv_result(priv, ring, async, 619 err = safexcel_handle_inv_result(priv, ring, async, sreq,
591 should_complete, ret); 620 should_complete, ret);
592 } else { 621 } else {
593 err = safexcel_handle_req_result(priv, ring, async, req->src, 622 err = safexcel_handle_req_result(priv, ring, async, req->src,
@@ -633,6 +662,8 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
633 ret = safexcel_send_req(async, ring, sreq, req->src, 662 ret = safexcel_send_req(async, ring, sreq, req->src,
634 req->dst, req->cryptlen, 0, 0, req->iv, 663 req->dst, req->cryptlen, 0, 0, req->iv,
635 commands, results); 664 commands, results);
665
666 sreq->rdescs = *results;
636 return ret; 667 return ret;
637} 668}
638 669
@@ -655,6 +686,7 @@ static int safexcel_aead_send(struct crypto_async_request *async, int ring,
655 req->cryptlen, req->assoclen, 686 req->cryptlen, req->assoclen,
656 crypto_aead_authsize(tfm), req->iv, 687 crypto_aead_authsize(tfm), req->iv,
657 commands, results); 688 commands, results);
689 sreq->rdescs = *results;
658 return ret; 690 return ret;
659} 691}
660 692
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index ac9282c1a5ec..a80a5e757b1f 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -41,19 +41,21 @@ struct safexcel_ahash_req {
41 u64 len[2]; 41 u64 len[2];
42 u64 processed[2]; 42 u64 processed[2];
43 43
44 u8 cache[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); 44 u8 cache[SHA512_BLOCK_SIZE << 1] __aligned(sizeof(u32));
45 dma_addr_t cache_dma; 45 dma_addr_t cache_dma;
46 unsigned int cache_sz; 46 unsigned int cache_sz;
47 47
48 u8 cache_next[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); 48 u8 cache_next[SHA512_BLOCK_SIZE << 1] __aligned(sizeof(u32));
49}; 49};
50 50
51static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req) 51static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req)
52{ 52{
53 if (req->len[1] > req->processed[1]) 53 u64 len, processed;
54 return 0xffffffff - (req->len[0] - req->processed[0]);
55 54
56 return req->len[0] - req->processed[0]; 55 len = (0xffffffff * req->len[1]) + req->len[0];
56 processed = (0xffffffff * req->processed[1]) + req->processed[0];
57
58 return len - processed;
57} 59}
58 60
59static void safexcel_hash_token(struct safexcel_command_desc *cdesc, 61static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
@@ -87,6 +89,9 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
87 cdesc->control_data.control0 |= ctx->alg; 89 cdesc->control_data.control0 |= ctx->alg;
88 cdesc->control_data.control0 |= req->digest; 90 cdesc->control_data.control0 |= req->digest;
89 91
92 if (!req->finish)
93 cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
94
90 if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) { 95 if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
91 if (req->processed[0] || req->processed[1]) { 96 if (req->processed[0] || req->processed[1]) {
92 if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5) 97 if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
@@ -105,9 +110,6 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
105 cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH; 110 cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH;
106 } 111 }
107 112
108 if (!req->finish)
109 cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
110
111 /* 113 /*
112 * Copy the input digest if needed, and setup the context 114 * Copy the input digest if needed, and setup the context
113 * fields. Do this now as we need it to setup the first command 115 * fields. Do this now as we need it to setup the first command
@@ -183,6 +185,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
183 dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz, 185 dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz,
184 DMA_TO_DEVICE); 186 DMA_TO_DEVICE);
185 sreq->cache_dma = 0; 187 sreq->cache_dma = 0;
188 sreq->cache_sz = 0;
186 } 189 }
187 190
188 if (sreq->finish) 191 if (sreq->finish)
@@ -209,11 +212,15 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
209 struct safexcel_command_desc *cdesc, *first_cdesc = NULL; 212 struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
210 struct safexcel_result_desc *rdesc; 213 struct safexcel_result_desc *rdesc;
211 struct scatterlist *sg; 214 struct scatterlist *sg;
212 int i, extra, n_cdesc = 0, ret = 0; 215 int i, extra = 0, n_cdesc = 0, ret = 0;
213 u64 queued, len, cache_len; 216 u64 queued, len, cache_len, cache_max;
217
218 cache_max = crypto_ahash_blocksize(ahash);
219 if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
220 cache_max <<= 1;
214 221
215 queued = len = safexcel_queued_len(req); 222 queued = len = safexcel_queued_len(req);
216 if (queued <= crypto_ahash_blocksize(ahash)) 223 if (queued <= cache_max)
217 cache_len = queued; 224 cache_len = queued;
218 else 225 else
219 cache_len = queued - areq->nbytes; 226 cache_len = queued - areq->nbytes;
@@ -223,26 +230,23 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
223 * fit into full blocks, cache it for the next send() call. 230 * fit into full blocks, cache it for the next send() call.
224 */ 231 */
225 extra = queued & (crypto_ahash_blocksize(ahash) - 1); 232 extra = queued & (crypto_ahash_blocksize(ahash) - 1);
233
234 if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC &&
235 extra < crypto_ahash_blocksize(ahash))
236 extra += crypto_ahash_blocksize(ahash);
237
238 /* If this is not the last request and the queued data
239 * is a multiple of a block, cache the last one for now.
240 */
226 if (!extra) 241 if (!extra)
227 /* If this is not the last request and the queued data
228 * is a multiple of a block, cache the last one for now.
229 */
230 extra = crypto_ahash_blocksize(ahash); 242 extra = crypto_ahash_blocksize(ahash);
231 243
232 if (extra) { 244 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
233 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), 245 req->cache_next, extra,
234 req->cache_next, extra, 246 areq->nbytes - extra);
235 areq->nbytes - extra);
236
237 queued -= extra;
238 len -= extra;
239 247
240 if (!queued) { 248 queued -= extra;
241 *commands = 0; 249 len -= extra;
242 *results = 0;
243 return 0;
244 }
245 }
246 } 250 }
247 251
248 /* Add a command descriptor for the cached data, if any */ 252 /* Add a command descriptor for the cached data, if any */
@@ -269,8 +273,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
269 } 273 }
270 274
271 /* Now handle the current ahash request buffer(s) */ 275 /* Now handle the current ahash request buffer(s) */
272 req->nents = dma_map_sg(priv->dev, areq->src, 276 req->nents = dma_map_sg(priv->dev, areq->src, sg_nents(areq->src),
273 sg_nents_for_len(areq->src, areq->nbytes),
274 DMA_TO_DEVICE); 277 DMA_TO_DEVICE);
275 if (!req->nents) { 278 if (!req->nents) {
276 ret = -ENOMEM; 279 ret = -ENOMEM;
@@ -345,6 +348,7 @@ unmap_cache:
345 if (req->cache_dma) { 348 if (req->cache_dma) {
346 dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz, 349 dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz,
347 DMA_TO_DEVICE); 350 DMA_TO_DEVICE);
351 req->cache_dma = 0;
348 req->cache_sz = 0; 352 req->cache_sz = 0;
349 } 353 }
350 354
@@ -486,7 +490,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
486 struct safexcel_inv_result result = {}; 490 struct safexcel_inv_result result = {};
487 int ring = ctx->base.ring; 491 int ring = ctx->base.ring;
488 492
489 memset(req, 0, sizeof(struct ahash_request)); 493 memset(req, 0, EIP197_AHASH_REQ_SIZE);
490 494
491 /* create invalidation request */ 495 /* create invalidation request */
492 init_completion(&result.completion); 496 init_completion(&result.completion);
@@ -519,10 +523,9 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
519/* safexcel_ahash_cache: cache data until at least one request can be sent to 523/* safexcel_ahash_cache: cache data until at least one request can be sent to
520 * the engine, aka. when there is at least 1 block size in the pipe. 524 * the engine, aka. when there is at least 1 block size in the pipe.
521 */ 525 */
522static int safexcel_ahash_cache(struct ahash_request *areq) 526static int safexcel_ahash_cache(struct ahash_request *areq, u32 cache_max)
523{ 527{
524 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 528 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
525 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
526 u64 queued, cache_len; 529 u64 queued, cache_len;
527 530
528 /* queued: everything accepted by the driver which will be handled by 531 /* queued: everything accepted by the driver which will be handled by
@@ -539,7 +542,7 @@ static int safexcel_ahash_cache(struct ahash_request *areq)
539 * In case there isn't enough bytes to proceed (less than a 542 * In case there isn't enough bytes to proceed (less than a
540 * block size), cache the data until we have enough. 543 * block size), cache the data until we have enough.
541 */ 544 */
542 if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) { 545 if (cache_len + areq->nbytes <= cache_max) {
543 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), 546 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
544 req->cache + cache_len, 547 req->cache + cache_len,
545 areq->nbytes, 0); 548 areq->nbytes, 0);
@@ -599,6 +602,7 @@ static int safexcel_ahash_update(struct ahash_request *areq)
599{ 602{
600 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 603 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
601 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); 604 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
605 u32 cache_max;
602 606
603 /* If the request is 0 length, do nothing */ 607 /* If the request is 0 length, do nothing */
604 if (!areq->nbytes) 608 if (!areq->nbytes)
@@ -608,7 +612,11 @@ static int safexcel_ahash_update(struct ahash_request *areq)
608 if (req->len[0] < areq->nbytes) 612 if (req->len[0] < areq->nbytes)
609 req->len[1]++; 613 req->len[1]++;
610 614
611 safexcel_ahash_cache(areq); 615 cache_max = crypto_ahash_blocksize(ahash);
616 if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
617 cache_max <<= 1;
618
619 safexcel_ahash_cache(areq, cache_max);
612 620
613 /* 621 /*
614 * We're not doing partial updates when performing an hmac request. 622 * We're not doing partial updates when performing an hmac request.
@@ -621,7 +629,7 @@ static int safexcel_ahash_update(struct ahash_request *areq)
621 return safexcel_ahash_enqueue(areq); 629 return safexcel_ahash_enqueue(areq);
622 630
623 if (!req->last_req && 631 if (!req->last_req &&
624 safexcel_queued_len(req) > crypto_ahash_blocksize(ahash)) 632 safexcel_queued_len(req) > cache_max)
625 return safexcel_ahash_enqueue(areq); 633 return safexcel_ahash_enqueue(areq);
626 634
627 return 0; 635 return 0;
@@ -678,6 +686,11 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
678 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); 686 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
679 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 687 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
680 struct safexcel_ahash_export_state *export = out; 688 struct safexcel_ahash_export_state *export = out;
689 u32 cache_sz;
690
691 cache_sz = crypto_ahash_blocksize(ahash);
692 if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
693 cache_sz <<= 1;
681 694
682 export->len[0] = req->len[0]; 695 export->len[0] = req->len[0];
683 export->len[1] = req->len[1]; 696 export->len[1] = req->len[1];
@@ -687,7 +700,7 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
687 export->digest = req->digest; 700 export->digest = req->digest;
688 701
689 memcpy(export->state, req->state, req->state_sz); 702 memcpy(export->state, req->state, req->state_sz);
690 memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash)); 703 memcpy(export->cache, req->cache, cache_sz);
691 704
692 return 0; 705 return 0;
693} 706}
@@ -697,12 +710,17 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
697 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); 710 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
698 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 711 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
699 const struct safexcel_ahash_export_state *export = in; 712 const struct safexcel_ahash_export_state *export = in;
713 u32 cache_sz;
700 int ret; 714 int ret;
701 715
702 ret = crypto_ahash_init(areq); 716 ret = crypto_ahash_init(areq);
703 if (ret) 717 if (ret)
704 return ret; 718 return ret;
705 719
720 cache_sz = crypto_ahash_blocksize(ahash);
721 if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
722 cache_sz <<= 1;
723
706 req->len[0] = export->len[0]; 724 req->len[0] = export->len[0];
707 req->len[1] = export->len[1]; 725 req->len[1] = export->len[1];
708 req->processed[0] = export->processed[0]; 726 req->processed[0] = export->processed[0];
@@ -710,7 +728,7 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
710 728
711 req->digest = export->digest; 729 req->digest = export->digest;
712 730
713 memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash)); 731 memcpy(req->cache, export->cache, cache_sz);
714 memcpy(req->state, export->state, req->state_sz); 732 memcpy(req->state, export->state, req->state_sz);
715 733
716 return 0; 734 return 0;
diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c
index eb75fa684876..142bc3f5c45c 100644
--- a/drivers/crypto/inside-secure/safexcel_ring.c
+++ b/drivers/crypto/inside-secure/safexcel_ring.c
@@ -145,6 +145,9 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr
145 (lower_32_bits(context) & GENMASK(31, 2)) >> 2; 145 (lower_32_bits(context) & GENMASK(31, 2)) >> 2;
146 cdesc->control_data.context_hi = upper_32_bits(context); 146 cdesc->control_data.context_hi = upper_32_bits(context);
147 147
148 if (priv->version == EIP197B || priv->version == EIP197D)
149 cdesc->control_data.options |= EIP197_OPTION_RC_AUTO;
150
148 /* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */ 151 /* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */
149 cdesc->control_data.refresh = 2; 152 cdesc->control_data.refresh = 2;
150 153