summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/inside-secure
diff options
context:
space:
mode:
authorAntoine Tenart <antoine.tenart@bootlin.com>2019-05-27 10:51:04 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2019-06-06 02:38:56 -0400
commit082ec2d48467b61aa89783e954645ec441714c4e (patch)
treec156852b9c23ba343a24614f6ccc5469b6d744c3 /drivers/crypto/inside-secure
parent57660b11d5adbb18182e9388b83b7fb214c4e1a1 (diff)
crypto: inside-secure - add support for HMAC updates
This patch adds support for HMAC updates in the Inside Secure SafeXcel crypto engine driver. Updates were supported for hash algorithms, but were never enabled for HMAC ones. This fixes boot time test issues. Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/inside-secure')
-rw-r--r--drivers/crypto/inside-secure/safexcel.h2
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c58
2 files changed, 38 insertions, 22 deletions
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index ca6ece5607cd..e0c202f33674 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -644,7 +644,7 @@ struct safexcel_ahash_export_state {
644 u32 digest; 644 u32 digest;
645 645
646 u32 state[SHA512_DIGEST_SIZE / sizeof(u32)]; 646 u32 state[SHA512_DIGEST_SIZE / sizeof(u32)];
647 u8 cache[SHA512_BLOCK_SIZE]; 647 u8 cache[SHA512_BLOCK_SIZE << 1];
648}; 648};
649 649
650/* 650/*
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index a9197d2c5a48..20950744ea4e 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -41,11 +41,11 @@ struct safexcel_ahash_req {
41 u64 len[2]; 41 u64 len[2];
42 u64 processed[2]; 42 u64 processed[2];
43 43
44 u8 cache[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); 44 u8 cache[SHA512_BLOCK_SIZE << 1] __aligned(sizeof(u32));
45 dma_addr_t cache_dma; 45 dma_addr_t cache_dma;
46 unsigned int cache_sz; 46 unsigned int cache_sz;
47 47
48 u8 cache_next[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); 48 u8 cache_next[SHA512_BLOCK_SIZE << 1] __aligned(sizeof(u32));
49}; 49};
50 50
51static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req) 51static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req)
@@ -89,6 +89,9 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
89 cdesc->control_data.control0 |= ctx->alg; 89 cdesc->control_data.control0 |= ctx->alg;
90 cdesc->control_data.control0 |= req->digest; 90 cdesc->control_data.control0 |= req->digest;
91 91
92 if (!req->finish)
93 cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
94
92 if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) { 95 if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
93 if (req->processed[0] || req->processed[1]) { 96 if (req->processed[0] || req->processed[1]) {
94 if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5) 97 if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
@@ -107,9 +110,6 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
107 cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH; 110 cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH;
108 } 111 }
109 112
110 if (!req->finish)
111 cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
112
113 /* 113 /*
114 * Copy the input digest if needed, and setup the context 114 * Copy the input digest if needed, and setup the context
115 * fields. Do this now as we need it to setup the first command 115 * fields. Do this now as we need it to setup the first command
@@ -212,11 +212,15 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
212 struct safexcel_command_desc *cdesc, *first_cdesc = NULL; 212 struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
213 struct safexcel_result_desc *rdesc; 213 struct safexcel_result_desc *rdesc;
214 struct scatterlist *sg; 214 struct scatterlist *sg;
215 int i, extra, n_cdesc = 0, ret = 0; 215 int i, extra = 0, n_cdesc = 0, ret = 0;
216 u64 queued, len, cache_len; 216 u64 queued, len, cache_len, cache_max;
217
218 cache_max = crypto_ahash_blocksize(ahash);
219 if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
220 cache_max <<= 1;
217 221
218 queued = len = safexcel_queued_len(req); 222 queued = len = safexcel_queued_len(req);
219 if (queued <= crypto_ahash_blocksize(ahash)) 223 if (queued <= cache_max)
220 cache_len = queued; 224 cache_len = queued;
221 else 225 else
222 cache_len = queued - areq->nbytes; 226 cache_len = queued - areq->nbytes;
@@ -227,6 +231,10 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
227 */ 231 */
228 extra = queued & (crypto_ahash_blocksize(ahash) - 1); 232 extra = queued & (crypto_ahash_blocksize(ahash) - 1);
229 233
234 if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC &&
235 extra < crypto_ahash_blocksize(ahash))
236 extra += crypto_ahash_blocksize(ahash);
237
230 /* If this is not the last request and the queued data 238 /* If this is not the last request and the queued data
231 * is a multiple of a block, cache the last one for now. 239 * is a multiple of a block, cache the last one for now.
232 */ 240 */
@@ -239,12 +247,6 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
239 247
240 queued -= extra; 248 queued -= extra;
241 len -= extra; 249 len -= extra;
242
243 if (!queued) {
244 *commands = 0;
245 *results = 0;
246 return 0;
247 }
248 } 250 }
249 251
250 /* Add a command descriptor for the cached data, if any */ 252 /* Add a command descriptor for the cached data, if any */
@@ -522,10 +524,9 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
522/* safexcel_ahash_cache: cache data until at least one request can be sent to 524/* safexcel_ahash_cache: cache data until at least one request can be sent to
523 * the engine, aka. when there is at least 1 block size in the pipe. 525 * the engine, aka. when there is at least 1 block size in the pipe.
524 */ 526 */
525static int safexcel_ahash_cache(struct ahash_request *areq) 527static int safexcel_ahash_cache(struct ahash_request *areq, u32 cache_max)
526{ 528{
527 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 529 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
528 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
529 u64 queued, cache_len; 530 u64 queued, cache_len;
530 531
531 /* queued: everything accepted by the driver which will be handled by 532 /* queued: everything accepted by the driver which will be handled by
@@ -542,7 +543,7 @@ static int safexcel_ahash_cache(struct ahash_request *areq)
542 * In case there isn't enough bytes to proceed (less than a 543 * In case there isn't enough bytes to proceed (less than a
543 * block size), cache the data until we have enough. 544 * block size), cache the data until we have enough.
544 */ 545 */
545 if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) { 546 if (cache_len + areq->nbytes <= cache_max) {
546 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), 547 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
547 req->cache + cache_len, 548 req->cache + cache_len,
548 areq->nbytes, 0); 549 areq->nbytes, 0);
@@ -602,6 +603,7 @@ static int safexcel_ahash_update(struct ahash_request *areq)
602{ 603{
603 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 604 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
604 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); 605 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
606 u32 cache_max;
605 607
606 /* If the request is 0 length, do nothing */ 608 /* If the request is 0 length, do nothing */
607 if (!areq->nbytes) 609 if (!areq->nbytes)
@@ -611,7 +613,11 @@ static int safexcel_ahash_update(struct ahash_request *areq)
611 if (req->len[0] < areq->nbytes) 613 if (req->len[0] < areq->nbytes)
612 req->len[1]++; 614 req->len[1]++;
613 615
614 safexcel_ahash_cache(areq); 616 cache_max = crypto_ahash_blocksize(ahash);
617 if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
618 cache_max <<= 1;
619
620 safexcel_ahash_cache(areq, cache_max);
615 621
616 /* 622 /*
617 * We're not doing partial updates when performing an hmac request. 623 * We're not doing partial updates when performing an hmac request.
@@ -624,7 +630,7 @@ static int safexcel_ahash_update(struct ahash_request *areq)
624 return safexcel_ahash_enqueue(areq); 630 return safexcel_ahash_enqueue(areq);
625 631
626 if (!req->last_req && 632 if (!req->last_req &&
627 safexcel_queued_len(req) > crypto_ahash_blocksize(ahash)) 633 safexcel_queued_len(req) > cache_max)
628 return safexcel_ahash_enqueue(areq); 634 return safexcel_ahash_enqueue(areq);
629 635
630 return 0; 636 return 0;
@@ -681,6 +687,11 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
681 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); 687 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
682 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 688 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
683 struct safexcel_ahash_export_state *export = out; 689 struct safexcel_ahash_export_state *export = out;
690 u32 cache_sz;
691
692 cache_sz = crypto_ahash_blocksize(ahash);
693 if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
694 cache_sz <<= 1;
684 695
685 export->len[0] = req->len[0]; 696 export->len[0] = req->len[0];
686 export->len[1] = req->len[1]; 697 export->len[1] = req->len[1];
@@ -690,7 +701,7 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
690 export->digest = req->digest; 701 export->digest = req->digest;
691 702
692 memcpy(export->state, req->state, req->state_sz); 703 memcpy(export->state, req->state, req->state_sz);
693 memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash)); 704 memcpy(export->cache, req->cache, cache_sz);
694 705
695 return 0; 706 return 0;
696} 707}
@@ -700,12 +711,17 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
700 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); 711 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
701 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 712 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
702 const struct safexcel_ahash_export_state *export = in; 713 const struct safexcel_ahash_export_state *export = in;
714 u32 cache_sz;
703 int ret; 715 int ret;
704 716
705 ret = crypto_ahash_init(areq); 717 ret = crypto_ahash_init(areq);
706 if (ret) 718 if (ret)
707 return ret; 719 return ret;
708 720
721 cache_sz = crypto_ahash_blocksize(ahash);
722 if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
723 cache_sz <<= 1;
724
709 req->len[0] = export->len[0]; 725 req->len[0] = export->len[0];
710 req->len[1] = export->len[1]; 726 req->len[1] = export->len[1];
711 req->processed[0] = export->processed[0]; 727 req->processed[0] = export->processed[0];
@@ -713,7 +729,7 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
713 729
714 req->digest = export->digest; 730 req->digest = export->digest;
715 731
716 memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash)); 732 memcpy(req->cache, export->cache, cache_sz);
717 memcpy(req->state, export->state, req->state_sz); 733 memcpy(req->state, export->state, req->state_sz);
718 734
719 return 0; 735 return 0;