aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGilad Ben-Yossef <gilad@benyossef.com>2019-04-18 09:38:44 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2019-04-25 03:38:13 -0400
commit6f17e00f77d8ab2a8ce1f41848181a88108ed6c7 (patch)
tree283517d625c83643a459773d6abb5e02db4d14b8
parent533edf9f93e84cabeae7c1acc8b3816c79f6f35a (diff)
crypto: ccree - read next IV from HW
We were computing the next IV in software instead of reading it from HW on the premise that this can be quicker due to the small size of IVs but this proved to be much more hassle and bug ridden than expected. Move to reading the next IV as computed by the HW. This fixes a number of issue with next IV being wrong for OFB, CTS-CBC and probably most of the other ciphers as well. Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c4
-rw-r--r--drivers/crypto/ccree/cc_cipher.c179
-rw-r--r--drivers/crypto/ccree/cc_cipher.h1
3 files changed, 85 insertions, 99 deletions
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 0ee1c52da0a4..adef3cfa1251 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -457,7 +457,7 @@ void cc_unmap_cipher_request(struct device *dev, void *ctx,
457 dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n", 457 dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
458 &req_ctx->gen_ctx.iv_dma_addr, ivsize); 458 &req_ctx->gen_ctx.iv_dma_addr, ivsize);
459 dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr, 459 dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
460 ivsize, DMA_TO_DEVICE); 460 ivsize, DMA_BIDIRECTIONAL);
461 } 461 }
462 /* Release pool */ 462 /* Release pool */
463 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI && 463 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
@@ -499,7 +499,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
499 dump_byte_array("iv", (u8 *)info, ivsize); 499 dump_byte_array("iv", (u8 *)info, ivsize);
500 req_ctx->gen_ctx.iv_dma_addr = 500 req_ctx->gen_ctx.iv_dma_addr =
501 dma_map_single(dev, (void *)info, 501 dma_map_single(dev, (void *)info,
502 ivsize, DMA_TO_DEVICE); 502 ivsize, DMA_BIDIRECTIONAL);
503 if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) { 503 if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
504 dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", 504 dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
505 ivsize, info); 505 ivsize, info);
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 4c7231d24631..15da3a35a6a1 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -464,6 +464,76 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
464 return 0; 464 return 0;
465} 465}
466 466
467static int cc_out_setup_mode(struct cc_cipher_ctx *ctx_p)
468{
469 switch (ctx_p->flow_mode) {
470 case S_DIN_to_AES:
471 return S_AES_to_DOUT;
472 case S_DIN_to_DES:
473 return S_DES_to_DOUT;
474 case S_DIN_to_SM4:
475 return S_SM4_to_DOUT;
476 default:
477 return ctx_p->flow_mode;
478 }
479}
480
481static void cc_setup_readiv_desc(struct crypto_tfm *tfm,
482 struct cipher_req_ctx *req_ctx,
483 unsigned int ivsize, struct cc_hw_desc desc[],
484 unsigned int *seq_size)
485{
486 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
487 struct device *dev = drvdata_to_dev(ctx_p->drvdata);
488 int cipher_mode = ctx_p->cipher_mode;
489 int flow_mode = cc_out_setup_mode(ctx_p);
490 int direction = req_ctx->gen_ctx.op_type;
491 dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
492
493 if (ctx_p->key_type == CC_POLICY_PROTECTED_KEY)
494 return;
495
496 switch (cipher_mode) {
497 case DRV_CIPHER_ECB:
498 break;
499 case DRV_CIPHER_CBC:
500 case DRV_CIPHER_CBC_CTS:
501 case DRV_CIPHER_CTR:
502 case DRV_CIPHER_OFB:
503 /* Read next IV */
504 hw_desc_init(&desc[*seq_size]);
505 set_dout_dlli(&desc[*seq_size], iv_dma_addr, ivsize, NS_BIT, 1);
506 set_cipher_config0(&desc[*seq_size], direction);
507 set_flow_mode(&desc[*seq_size], flow_mode);
508 set_cipher_mode(&desc[*seq_size], cipher_mode);
509 if (cipher_mode == DRV_CIPHER_CTR ||
510 cipher_mode == DRV_CIPHER_OFB) {
511 set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE1);
512 } else {
513 set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE0);
514 }
515 set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
516 (*seq_size)++;
517 break;
518 case DRV_CIPHER_XTS:
519 case DRV_CIPHER_ESSIV:
520 case DRV_CIPHER_BITLOCKER:
521 /* IV */
522 hw_desc_init(&desc[*seq_size]);
523 set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE1);
524 set_cipher_mode(&desc[*seq_size], cipher_mode);
525 set_cipher_config0(&desc[*seq_size], direction);
526 set_flow_mode(&desc[*seq_size], flow_mode);
527 set_dout_dlli(&desc[*seq_size], iv_dma_addr, CC_AES_BLOCK_SIZE,
528 NS_BIT, 1);
529 set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
530 (*seq_size)++;
531 break;
532 default:
533 dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
534 }
535}
536
467static void cc_setup_state_desc(struct crypto_tfm *tfm, 537static void cc_setup_state_desc(struct crypto_tfm *tfm,
468 struct cipher_req_ctx *req_ctx, 538 struct cipher_req_ctx *req_ctx,
469 unsigned int ivsize, unsigned int nbytes, 539 unsigned int ivsize, unsigned int nbytes,
@@ -681,12 +751,14 @@ static void cc_setup_mlli_desc(struct crypto_tfm *tfm,
681static void cc_setup_flow_desc(struct crypto_tfm *tfm, 751static void cc_setup_flow_desc(struct crypto_tfm *tfm,
682 struct cipher_req_ctx *req_ctx, 752 struct cipher_req_ctx *req_ctx,
683 struct scatterlist *dst, struct scatterlist *src, 753 struct scatterlist *dst, struct scatterlist *src,
684 unsigned int nbytes, void *areq, 754 unsigned int nbytes, struct cc_hw_desc desc[],
685 struct cc_hw_desc desc[], unsigned int *seq_size) 755 unsigned int *seq_size)
686{ 756{
687 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); 757 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
688 struct device *dev = drvdata_to_dev(ctx_p->drvdata); 758 struct device *dev = drvdata_to_dev(ctx_p->drvdata);
689 unsigned int flow_mode = cc_out_flow_mode(ctx_p); 759 unsigned int flow_mode = cc_out_flow_mode(ctx_p);
760 bool last_desc = (ctx_p->key_type == CC_POLICY_PROTECTED_KEY ||
761 ctx_p->cipher_mode == DRV_CIPHER_ECB);
690 762
691 /* Process */ 763 /* Process */
692 if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) { 764 if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) {
@@ -698,8 +770,8 @@ static void cc_setup_flow_desc(struct crypto_tfm *tfm,
698 set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src), 770 set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src),
699 nbytes, NS_BIT); 771 nbytes, NS_BIT);
700 set_dout_dlli(&desc[*seq_size], sg_dma_address(dst), 772 set_dout_dlli(&desc[*seq_size], sg_dma_address(dst),
701 nbytes, NS_BIT, (!areq ? 0 : 1)); 773 nbytes, NS_BIT, (!last_desc ? 0 : 1));
702 if (areq) 774 if (last_desc)
703 set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]); 775 set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
704 776
705 set_flow_mode(&desc[*seq_size], flow_mode); 777 set_flow_mode(&desc[*seq_size], flow_mode);
@@ -716,7 +788,7 @@ static void cc_setup_flow_desc(struct crypto_tfm *tfm,
716 set_dout_mlli(&desc[*seq_size], 788 set_dout_mlli(&desc[*seq_size],
717 ctx_p->drvdata->mlli_sram_addr, 789 ctx_p->drvdata->mlli_sram_addr,
718 req_ctx->in_mlli_nents, NS_BIT, 790 req_ctx->in_mlli_nents, NS_BIT,
719 (!areq ? 0 : 1)); 791 (!last_desc ? 0 : 1));
720 } else { 792 } else {
721 dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n", 793 dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
722 (unsigned int)ctx_p->drvdata->mlli_sram_addr, 794 (unsigned int)ctx_p->drvdata->mlli_sram_addr,
@@ -727,9 +799,9 @@ static void cc_setup_flow_desc(struct crypto_tfm *tfm,
727 (LLI_ENTRY_BYTE_SIZE * 799 (LLI_ENTRY_BYTE_SIZE *
728 req_ctx->in_mlli_nents)), 800 req_ctx->in_mlli_nents)),
729 req_ctx->out_mlli_nents, NS_BIT, 801 req_ctx->out_mlli_nents, NS_BIT,
730 (!areq ? 0 : 1)); 802 (!last_desc ? 0 : 1));
731 } 803 }
732 if (areq) 804 if (last_desc)
733 set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]); 805 set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
734 806
735 set_flow_mode(&desc[*seq_size], flow_mode); 807 set_flow_mode(&desc[*seq_size], flow_mode);
@@ -737,38 +809,6 @@ static void cc_setup_flow_desc(struct crypto_tfm *tfm,
737 } 809 }
738} 810}
739 811
740/*
741 * Update a CTR-AES 128 bit counter
742 */
743static void cc_update_ctr(u8 *ctr, unsigned int increment)
744{
745 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
746 IS_ALIGNED((unsigned long)ctr, 8)) {
747
748 __be64 *high_be = (__be64 *)ctr;
749 __be64 *low_be = high_be + 1;
750 u64 orig_low = __be64_to_cpu(*low_be);
751 u64 new_low = orig_low + (u64)increment;
752
753 *low_be = __cpu_to_be64(new_low);
754
755 if (new_low < orig_low)
756 *high_be = __cpu_to_be64(__be64_to_cpu(*high_be) + 1);
757 } else {
758 u8 *pos = (ctr + AES_BLOCK_SIZE);
759 u8 val;
760 unsigned int size;
761
762 for (; increment; increment--)
763 for (size = AES_BLOCK_SIZE; size; size--) {
764 val = *--pos + 1;
765 *pos = val;
766 if (val)
767 break;
768 }
769 }
770}
771
772static void cc_cipher_complete(struct device *dev, void *cc_req, int err) 812static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
773{ 813{
774 struct skcipher_request *req = (struct skcipher_request *)cc_req; 814 struct skcipher_request *req = (struct skcipher_request *)cc_req;
@@ -776,44 +816,11 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
776 struct scatterlist *src = req->src; 816 struct scatterlist *src = req->src;
777 struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req); 817 struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
778 struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req); 818 struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
779 struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
780 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
781 unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm); 819 unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
782 unsigned int len;
783 820
784 cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst); 821 cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
785 822 memcpy(req->iv, req_ctx->iv, ivsize);
786 switch (ctx_p->cipher_mode) {
787 case DRV_CIPHER_CBC:
788 /*
789 * The crypto API expects us to set the req->iv to the last
790 * ciphertext block. For encrypt, simply copy from the result.
791 * For decrypt, we must copy from a saved buffer since this
792 * could be an in-place decryption operation and the src is
793 * lost by this point.
794 */
795 if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
796 memcpy(req->iv, req_ctx->backup_info, ivsize);
797 kzfree(req_ctx->backup_info);
798 } else if (!err) {
799 len = req->cryptlen - ivsize;
800 scatterwalk_map_and_copy(req->iv, req->dst, len,
801 ivsize, 0);
802 }
803 break;
804
805 case DRV_CIPHER_CTR:
806 /* Compute the counter of the last block */
807 len = ALIGN(req->cryptlen, AES_BLOCK_SIZE) / AES_BLOCK_SIZE;
808 cc_update_ctr((u8 *)req->iv, len);
809 break;
810
811 default:
812 break;
813 }
814
815 kzfree(req_ctx->iv); 823 kzfree(req_ctx->iv);
816
817 skcipher_request_complete(req, err); 824 skcipher_request_complete(req, err);
818} 825}
819 826
@@ -896,7 +903,9 @@ static int cc_cipher_process(struct skcipher_request *req,
896 /* Setup key */ 903 /* Setup key */
897 cc_setup_key_desc(tfm, req_ctx, nbytes, desc, &seq_len); 904 cc_setup_key_desc(tfm, req_ctx, nbytes, desc, &seq_len);
898 /* Data processing */ 905 /* Data processing */
899 cc_setup_flow_desc(tfm, req_ctx, dst, src, nbytes, req, desc, &seq_len); 906 cc_setup_flow_desc(tfm, req_ctx, dst, src, nbytes, desc, &seq_len);
907 /* Read next IV */
908 cc_setup_readiv_desc(tfm, req_ctx, ivsize, desc, &seq_len);
900 909
901 /* STAT_PHASE_3: Lock HW and push sequence */ 910 /* STAT_PHASE_3: Lock HW and push sequence */
902 911
@@ -911,7 +920,6 @@ static int cc_cipher_process(struct skcipher_request *req,
911 920
912exit_process: 921exit_process:
913 if (rc != -EINPROGRESS && rc != -EBUSY) { 922 if (rc != -EINPROGRESS && rc != -EBUSY) {
914 kzfree(req_ctx->backup_info);
915 kzfree(req_ctx->iv); 923 kzfree(req_ctx->iv);
916 } 924 }
917 925
@@ -929,31 +937,10 @@ static int cc_cipher_encrypt(struct skcipher_request *req)
929 937
930static int cc_cipher_decrypt(struct skcipher_request *req) 938static int cc_cipher_decrypt(struct skcipher_request *req)
931{ 939{
932 struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
933 struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
934 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
935 struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req); 940 struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
936 unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
937 gfp_t flags = cc_gfp_flags(&req->base);
938 unsigned int len;
939 941
940 memset(req_ctx, 0, sizeof(*req_ctx)); 942 memset(req_ctx, 0, sizeof(*req_ctx));
941 943
942 if ((ctx_p->cipher_mode == DRV_CIPHER_CBC) &&
943 (req->cryptlen >= ivsize)) {
944
945 /* Allocate and save the last IV sized bytes of the source,
946 * which will be lost in case of in-place decryption.
947 */
948 req_ctx->backup_info = kzalloc(ivsize, flags);
949 if (!req_ctx->backup_info)
950 return -ENOMEM;
951
952 len = req->cryptlen - ivsize;
953 scatterwalk_map_and_copy(req_ctx->backup_info, req->src, len,
954 ivsize, 0);
955 }
956
957 return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT); 944 return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
958} 945}
959 946
diff --git a/drivers/crypto/ccree/cc_cipher.h b/drivers/crypto/ccree/cc_cipher.h
index 4dbc0a1e6d5c..312d67f88414 100644
--- a/drivers/crypto/ccree/cc_cipher.h
+++ b/drivers/crypto/ccree/cc_cipher.h
@@ -20,7 +20,6 @@ struct cipher_req_ctx {
20 u32 in_mlli_nents; 20 u32 in_mlli_nents;
21 u32 out_nents; 21 u32 out_nents;
22 u32 out_mlli_nents; 22 u32 out_mlli_nents;
23 u8 *backup_info; /*store iv for generated IV flow*/
24 u8 *iv; 23 u8 *iv;
25 struct mlli_params mlli_params; 24 struct mlli_params mlli_params;
26}; 25};