diff options
author | Gilad Ben-Yossef <gilad@benyossef.com> | 2018-01-22 04:27:03 -0500 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2018-02-15 10:26:42 -0500 |
commit | ff27e85a85bbde19589f775297db92ff925e5981 (patch) | |
tree | 9ae7d4858b55898b41f48833c9e02136975b7602 /drivers/crypto/ccree/cc_buffer_mgr.c | |
parent | 63893811b0fcb52f6eaf9811cc08bddd46f81c3e (diff) |
crypto: ccree - add AEAD support
Add CryptoCell AEAD support
Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/ccree/cc_buffer_mgr.c')
-rw-r--r-- | drivers/crypto/ccree/cc_buffer_mgr.c | 882 |
1 files changed, 882 insertions, 0 deletions
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c index bb306b4efa4c..b32577477b4c 100644 --- a/drivers/crypto/ccree/cc_buffer_mgr.c +++ b/drivers/crypto/ccree/cc_buffer_mgr.c | |||
@@ -1,6 +1,7 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ | 2 | /* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ |
3 | 3 | ||
4 | #include <crypto/internal/aead.h> | ||
4 | #include <crypto/authenc.h> | 5 | #include <crypto/authenc.h> |
5 | #include <crypto/scatterwalk.h> | 6 | #include <crypto/scatterwalk.h> |
6 | #include <linux/dmapool.h> | 7 | #include <linux/dmapool.h> |
@@ -10,6 +11,7 @@ | |||
10 | #include "cc_lli_defs.h" | 11 | #include "cc_lli_defs.h" |
11 | #include "cc_cipher.h" | 12 | #include "cc_cipher.h" |
12 | #include "cc_hash.h" | 13 | #include "cc_hash.h" |
14 | #include "cc_aead.h" | ||
13 | 15 | ||
14 | enum dma_buffer_type { | 16 | enum dma_buffer_type { |
15 | DMA_NULL_TYPE = -1, | 17 | DMA_NULL_TYPE = -1, |
@@ -52,6 +54,27 @@ static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type) | |||
52 | } | 54 | } |
53 | 55 | ||
54 | /** | 56 | /** |
57 | * cc_copy_mac() - Copy MAC to temporary location | ||
58 | * | ||
59 | * @dev: device object | ||
60 | * @req: aead request object | ||
61 | * @dir: [IN] copy from/to sgl | ||
62 | */ | ||
63 | static void cc_copy_mac(struct device *dev, struct aead_request *req, | ||
64 | enum cc_sg_cpy_direct dir) | ||
65 | { | ||
66 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); | ||
67 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
68 | u32 skip = req->assoclen + req->cryptlen; | ||
69 | |||
70 | if (areq_ctx->is_gcm4543) | ||
71 | skip += crypto_aead_ivsize(tfm); | ||
72 | |||
73 | cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src, | ||
74 | (skip - areq_ctx->req_authsize), skip, dir); | ||
75 | } | ||
76 | |||
77 | /** | ||
55 | * cc_get_sgl_nents() - Get scatterlist number of entries. | 78 | * cc_get_sgl_nents() - Get scatterlist number of entries. |
56 | * | 79 | * |
57 | * @sg_list: SG list | 80 | * @sg_list: SG list |
@@ -246,6 +269,27 @@ build_mlli_exit: | |||
246 | return rc; | 269 | return rc; |
247 | } | 270 | } |
248 | 271 | ||
272 | static void cc_add_buffer_entry(struct device *dev, | ||
273 | struct buffer_array *sgl_data, | ||
274 | dma_addr_t buffer_dma, unsigned int buffer_len, | ||
275 | bool is_last_entry, u32 *mlli_nents) | ||
276 | { | ||
277 | unsigned int index = sgl_data->num_of_buffers; | ||
278 | |||
279 | dev_dbg(dev, "index=%u single_buff=%pad buffer_len=0x%08X is_last=%d\n", | ||
280 | index, &buffer_dma, buffer_len, is_last_entry); | ||
281 | sgl_data->nents[index] = 1; | ||
282 | sgl_data->entry[index].buffer_dma = buffer_dma; | ||
283 | sgl_data->offset[index] = 0; | ||
284 | sgl_data->total_data_len[index] = buffer_len; | ||
285 | sgl_data->type[index] = DMA_BUFF_TYPE; | ||
286 | sgl_data->is_last[index] = is_last_entry; | ||
287 | sgl_data->mlli_nents[index] = mlli_nents; | ||
288 | if (sgl_data->mlli_nents[index]) | ||
289 | *sgl_data->mlli_nents[index] = 0; | ||
290 | sgl_data->num_of_buffers++; | ||
291 | } | ||
292 | |||
249 | static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data, | 293 | static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data, |
250 | unsigned int nents, struct scatterlist *sgl, | 294 | unsigned int nents, struct scatterlist *sgl, |
251 | unsigned int data_len, unsigned int data_offset, | 295 | unsigned int data_len, unsigned int data_offset, |
@@ -349,6 +393,33 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg, | |||
349 | return 0; | 393 | return 0; |
350 | } | 394 | } |
351 | 395 | ||
396 | static int | ||
397 | cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx, | ||
398 | u8 *config_data, struct buffer_array *sg_data, | ||
399 | unsigned int assoclen) | ||
400 | { | ||
401 | dev_dbg(dev, " handle additional data config set to DLLI\n"); | ||
402 | /* create sg for the current buffer */ | ||
403 | sg_init_one(&areq_ctx->ccm_adata_sg, config_data, | ||
404 | AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size); | ||
405 | if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) { | ||
406 | dev_err(dev, "dma_map_sg() config buffer failed\n"); | ||
407 | return -ENOMEM; | ||
408 | } | ||
409 | dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n", | ||
410 | &sg_dma_address(&areq_ctx->ccm_adata_sg), | ||
411 | sg_page(&areq_ctx->ccm_adata_sg), | ||
412 | sg_virt(&areq_ctx->ccm_adata_sg), | ||
413 | areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length); | ||
414 | /* prepare for case of MLLI */ | ||
415 | if (assoclen > 0) { | ||
416 | cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg, | ||
417 | (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size), | ||
418 | 0, false, NULL); | ||
419 | } | ||
420 | return 0; | ||
421 | } | ||
422 | |||
352 | static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx, | 423 | static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx, |
353 | u8 *curr_buff, u32 curr_buff_cnt, | 424 | u8 *curr_buff, u32 curr_buff_cnt, |
354 | struct buffer_array *sg_data) | 425 | struct buffer_array *sg_data) |
@@ -497,6 +568,817 @@ cipher_exit: | |||
497 | return rc; | 568 | return rc; |
498 | } | 569 | } |
499 | 570 | ||
571 | void cc_unmap_aead_request(struct device *dev, struct aead_request *req) | ||
572 | { | ||
573 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); | ||
574 | unsigned int hw_iv_size = areq_ctx->hw_iv_size; | ||
575 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
576 | struct cc_drvdata *drvdata = dev_get_drvdata(dev); | ||
577 | u32 dummy; | ||
578 | bool chained; | ||
579 | u32 size_to_unmap = 0; | ||
580 | |||
581 | if (areq_ctx->mac_buf_dma_addr) { | ||
582 | dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr, | ||
583 | MAX_MAC_SIZE, DMA_BIDIRECTIONAL); | ||
584 | } | ||
585 | |||
586 | if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) { | ||
587 | if (areq_ctx->hkey_dma_addr) { | ||
588 | dma_unmap_single(dev, areq_ctx->hkey_dma_addr, | ||
589 | AES_BLOCK_SIZE, DMA_BIDIRECTIONAL); | ||
590 | } | ||
591 | |||
592 | if (areq_ctx->gcm_block_len_dma_addr) { | ||
593 | dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr, | ||
594 | AES_BLOCK_SIZE, DMA_TO_DEVICE); | ||
595 | } | ||
596 | |||
597 | if (areq_ctx->gcm_iv_inc1_dma_addr) { | ||
598 | dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr, | ||
599 | AES_BLOCK_SIZE, DMA_TO_DEVICE); | ||
600 | } | ||
601 | |||
602 | if (areq_ctx->gcm_iv_inc2_dma_addr) { | ||
603 | dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr, | ||
604 | AES_BLOCK_SIZE, DMA_TO_DEVICE); | ||
605 | } | ||
606 | } | ||
607 | |||
608 | if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { | ||
609 | if (areq_ctx->ccm_iv0_dma_addr) { | ||
610 | dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr, | ||
611 | AES_BLOCK_SIZE, DMA_TO_DEVICE); | ||
612 | } | ||
613 | |||
614 | dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE); | ||
615 | } | ||
616 | if (areq_ctx->gen_ctx.iv_dma_addr) { | ||
617 | dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr, | ||
618 | hw_iv_size, DMA_BIDIRECTIONAL); | ||
619 | } | ||
620 | |||
621 | /*In case a pool was set, a table was | ||
622 | *allocated and should be released | ||
623 | */ | ||
624 | if (areq_ctx->mlli_params.curr_pool) { | ||
625 | dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n", | ||
626 | &areq_ctx->mlli_params.mlli_dma_addr, | ||
627 | areq_ctx->mlli_params.mlli_virt_addr); | ||
628 | dma_pool_free(areq_ctx->mlli_params.curr_pool, | ||
629 | areq_ctx->mlli_params.mlli_virt_addr, | ||
630 | areq_ctx->mlli_params.mlli_dma_addr); | ||
631 | } | ||
632 | |||
633 | dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", | ||
634 | sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, | ||
635 | req->assoclen, req->cryptlen); | ||
636 | size_to_unmap = req->assoclen + req->cryptlen; | ||
637 | if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) | ||
638 | size_to_unmap += areq_ctx->req_authsize; | ||
639 | if (areq_ctx->is_gcm4543) | ||
640 | size_to_unmap += crypto_aead_ivsize(tfm); | ||
641 | |||
642 | dma_unmap_sg(dev, req->src, | ||
643 | cc_get_sgl_nents(dev, req->src, size_to_unmap, | ||
644 | &dummy, &chained), | ||
645 | DMA_BIDIRECTIONAL); | ||
646 | if (req->src != req->dst) { | ||
647 | dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n", | ||
648 | sg_virt(req->dst)); | ||
649 | dma_unmap_sg(dev, req->dst, | ||
650 | cc_get_sgl_nents(dev, req->dst, size_to_unmap, | ||
651 | &dummy, &chained), | ||
652 | DMA_BIDIRECTIONAL); | ||
653 | } | ||
654 | if (drvdata->coherent && | ||
655 | areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT && | ||
656 | req->src == req->dst) { | ||
657 | /* copy back mac from temporary location to deal with possible | ||
658 | * data memory overriding that caused by cache coherence | ||
659 | * problem. | ||
660 | */ | ||
661 | cc_copy_mac(dev, req, CC_SG_FROM_BUF); | ||
662 | } | ||
663 | } | ||
664 | |||
665 | static int cc_get_aead_icv_nents(struct device *dev, struct scatterlist *sgl, | ||
666 | unsigned int sgl_nents, unsigned int authsize, | ||
667 | u32 last_entry_data_size, | ||
668 | bool *is_icv_fragmented) | ||
669 | { | ||
670 | unsigned int icv_max_size = 0; | ||
671 | unsigned int icv_required_size = authsize > last_entry_data_size ? | ||
672 | (authsize - last_entry_data_size) : | ||
673 | authsize; | ||
674 | unsigned int nents; | ||
675 | unsigned int i; | ||
676 | |||
677 | if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) { | ||
678 | *is_icv_fragmented = false; | ||
679 | return 0; | ||
680 | } | ||
681 | |||
682 | for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) { | ||
683 | if (!sgl) | ||
684 | break; | ||
685 | sgl = sg_next(sgl); | ||
686 | } | ||
687 | |||
688 | if (sgl) | ||
689 | icv_max_size = sgl->length; | ||
690 | |||
691 | if (last_entry_data_size > authsize) { | ||
692 | /* ICV attached to data in last entry (not fragmented!) */ | ||
693 | nents = 0; | ||
694 | *is_icv_fragmented = false; | ||
695 | } else if (last_entry_data_size == authsize) { | ||
696 | /* ICV placed in whole last entry (not fragmented!) */ | ||
697 | nents = 1; | ||
698 | *is_icv_fragmented = false; | ||
699 | } else if (icv_max_size > icv_required_size) { | ||
700 | nents = 1; | ||
701 | *is_icv_fragmented = true; | ||
702 | } else if (icv_max_size == icv_required_size) { | ||
703 | nents = 2; | ||
704 | *is_icv_fragmented = true; | ||
705 | } else { | ||
706 | dev_err(dev, "Unsupported num. of ICV fragments (> %d)\n", | ||
707 | MAX_ICV_NENTS_SUPPORTED); | ||
708 | nents = -1; /*unsupported*/ | ||
709 | } | ||
710 | dev_dbg(dev, "is_frag=%s icv_nents=%u\n", | ||
711 | (*is_icv_fragmented ? "true" : "false"), nents); | ||
712 | |||
713 | return nents; | ||
714 | } | ||
715 | |||
716 | static int cc_aead_chain_iv(struct cc_drvdata *drvdata, | ||
717 | struct aead_request *req, | ||
718 | struct buffer_array *sg_data, | ||
719 | bool is_last, bool do_chain) | ||
720 | { | ||
721 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); | ||
722 | unsigned int hw_iv_size = areq_ctx->hw_iv_size; | ||
723 | struct device *dev = drvdata_to_dev(drvdata); | ||
724 | int rc = 0; | ||
725 | |||
726 | if (!req->iv) { | ||
727 | areq_ctx->gen_ctx.iv_dma_addr = 0; | ||
728 | goto chain_iv_exit; | ||
729 | } | ||
730 | |||
731 | areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv, | ||
732 | hw_iv_size, | ||
733 | DMA_BIDIRECTIONAL); | ||
734 | if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) { | ||
735 | dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", | ||
736 | hw_iv_size, req->iv); | ||
737 | rc = -ENOMEM; | ||
738 | goto chain_iv_exit; | ||
739 | } | ||
740 | |||
741 | dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n", | ||
742 | hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr); | ||
743 | // TODO: what about CTR?? ask Ron | ||
744 | if (do_chain && areq_ctx->plaintext_authenticate_only) { | ||
745 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
746 | unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm); | ||
747 | unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET; | ||
748 | /* Chain to given list */ | ||
749 | cc_add_buffer_entry(dev, sg_data, | ||
750 | (areq_ctx->gen_ctx.iv_dma_addr + iv_ofs), | ||
751 | iv_size_to_authenc, is_last, | ||
752 | &areq_ctx->assoc.mlli_nents); | ||
753 | areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI; | ||
754 | } | ||
755 | |||
756 | chain_iv_exit: | ||
757 | return rc; | ||
758 | } | ||
759 | |||
760 | static int cc_aead_chain_assoc(struct cc_drvdata *drvdata, | ||
761 | struct aead_request *req, | ||
762 | struct buffer_array *sg_data, | ||
763 | bool is_last, bool do_chain) | ||
764 | { | ||
765 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); | ||
766 | int rc = 0; | ||
767 | u32 mapped_nents = 0; | ||
768 | struct scatterlist *current_sg = req->src; | ||
769 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
770 | unsigned int sg_index = 0; | ||
771 | u32 size_of_assoc = req->assoclen; | ||
772 | struct device *dev = drvdata_to_dev(drvdata); | ||
773 | |||
774 | if (areq_ctx->is_gcm4543) | ||
775 | size_of_assoc += crypto_aead_ivsize(tfm); | ||
776 | |||
777 | if (!sg_data) { | ||
778 | rc = -EINVAL; | ||
779 | goto chain_assoc_exit; | ||
780 | } | ||
781 | |||
782 | if (req->assoclen == 0) { | ||
783 | areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL; | ||
784 | areq_ctx->assoc.nents = 0; | ||
785 | areq_ctx->assoc.mlli_nents = 0; | ||
786 | dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n", | ||
787 | cc_dma_buf_type(areq_ctx->assoc_buff_type), | ||
788 | areq_ctx->assoc.nents); | ||
789 | goto chain_assoc_exit; | ||
790 | } | ||
791 | |||
792 | //iterate over the sgl to see how many entries are for associated data | ||
793 | //it is assumed that if we reach here , the sgl is already mapped | ||
794 | sg_index = current_sg->length; | ||
795 | //the first entry in the scatter list contains all the associated data | ||
796 | if (sg_index > size_of_assoc) { | ||
797 | mapped_nents++; | ||
798 | } else { | ||
799 | while (sg_index <= size_of_assoc) { | ||
800 | current_sg = sg_next(current_sg); | ||
801 | /* if have reached the end of the sgl, then this is | ||
802 | * unexpected | ||
803 | */ | ||
804 | if (!current_sg) { | ||
805 | dev_err(dev, "reached end of sg list. unexpected\n"); | ||
806 | return -EINVAL; | ||
807 | } | ||
808 | sg_index += current_sg->length; | ||
809 | mapped_nents++; | ||
810 | } | ||
811 | } | ||
812 | if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) { | ||
813 | dev_err(dev, "Too many fragments. current %d max %d\n", | ||
814 | mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES); | ||
815 | return -ENOMEM; | ||
816 | } | ||
817 | areq_ctx->assoc.nents = mapped_nents; | ||
818 | |||
819 | /* in CCM case we have additional entry for | ||
820 | * ccm header configurations | ||
821 | */ | ||
822 | if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { | ||
823 | if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) { | ||
824 | dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n", | ||
825 | (areq_ctx->assoc.nents + 1), | ||
826 | LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES); | ||
827 | rc = -ENOMEM; | ||
828 | goto chain_assoc_exit; | ||
829 | } | ||
830 | } | ||
831 | |||
832 | if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null) | ||
833 | areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI; | ||
834 | else | ||
835 | areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI; | ||
836 | |||
837 | if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) { | ||
838 | dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n", | ||
839 | cc_dma_buf_type(areq_ctx->assoc_buff_type), | ||
840 | areq_ctx->assoc.nents); | ||
841 | cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src, | ||
842 | req->assoclen, 0, is_last, | ||
843 | &areq_ctx->assoc.mlli_nents); | ||
844 | areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI; | ||
845 | } | ||
846 | |||
847 | chain_assoc_exit: | ||
848 | return rc; | ||
849 | } | ||
850 | |||
851 | static void cc_prepare_aead_data_dlli(struct aead_request *req, | ||
852 | u32 *src_last_bytes, u32 *dst_last_bytes) | ||
853 | { | ||
854 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); | ||
855 | enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; | ||
856 | unsigned int authsize = areq_ctx->req_authsize; | ||
857 | |||
858 | areq_ctx->is_icv_fragmented = false; | ||
859 | if (req->src == req->dst) { | ||
860 | /*INPLACE*/ | ||
861 | areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) + | ||
862 | (*src_last_bytes - authsize); | ||
863 | areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) + | ||
864 | (*src_last_bytes - authsize); | ||
865 | } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) { | ||
866 | /*NON-INPLACE and DECRYPT*/ | ||
867 | areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) + | ||
868 | (*src_last_bytes - authsize); | ||
869 | areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) + | ||
870 | (*src_last_bytes - authsize); | ||
871 | } else { | ||
872 | /*NON-INPLACE and ENCRYPT*/ | ||
873 | areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->dst_sgl) + | ||
874 | (*dst_last_bytes - authsize); | ||
875 | areq_ctx->icv_virt_addr = sg_virt(areq_ctx->dst_sgl) + | ||
876 | (*dst_last_bytes - authsize); | ||
877 | } | ||
878 | } | ||
879 | |||
880 | static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata, | ||
881 | struct aead_request *req, | ||
882 | struct buffer_array *sg_data, | ||
883 | u32 *src_last_bytes, u32 *dst_last_bytes, | ||
884 | bool is_last_table) | ||
885 | { | ||
886 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); | ||
887 | enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; | ||
888 | unsigned int authsize = areq_ctx->req_authsize; | ||
889 | int rc = 0, icv_nents; | ||
890 | struct device *dev = drvdata_to_dev(drvdata); | ||
891 | struct scatterlist *sg; | ||
892 | |||
893 | if (req->src == req->dst) { | ||
894 | /*INPLACE*/ | ||
895 | cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents, | ||
896 | areq_ctx->src_sgl, areq_ctx->cryptlen, | ||
897 | areq_ctx->src_offset, is_last_table, | ||
898 | &areq_ctx->src.mlli_nents); | ||
899 | |||
900 | icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl, | ||
901 | areq_ctx->src.nents, | ||
902 | authsize, *src_last_bytes, | ||
903 | &areq_ctx->is_icv_fragmented); | ||
904 | if (icv_nents < 0) { | ||
905 | rc = -ENOTSUPP; | ||
906 | goto prepare_data_mlli_exit; | ||
907 | } | ||
908 | |||
909 | if (areq_ctx->is_icv_fragmented) { | ||
910 | /* Backup happens only when ICV is fragmented, ICV | ||
911 | * verification is made by CPU compare in order to | ||
912 | * simplify MAC verification upon request completion | ||
913 | */ | ||
914 | if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) { | ||
915 | /* In coherent platforms (e.g. ACP) | ||
916 | * already copying ICV for any | ||
917 | * INPLACE-DECRYPT operation, hence | ||
918 | * we must neglect this code. | ||
919 | */ | ||
920 | if (!drvdata->coherent) | ||
921 | cc_copy_mac(dev, req, CC_SG_TO_BUF); | ||
922 | |||
923 | areq_ctx->icv_virt_addr = areq_ctx->backup_mac; | ||
924 | } else { | ||
925 | areq_ctx->icv_virt_addr = areq_ctx->mac_buf; | ||
926 | areq_ctx->icv_dma_addr = | ||
927 | areq_ctx->mac_buf_dma_addr; | ||
928 | } | ||
929 | } else { /* Contig. ICV */ | ||
930 | sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1]; | ||
931 | /*Should hanlde if the sg is not contig.*/ | ||
932 | areq_ctx->icv_dma_addr = sg_dma_address(sg) + | ||
933 | (*src_last_bytes - authsize); | ||
934 | areq_ctx->icv_virt_addr = sg_virt(sg) + | ||
935 | (*src_last_bytes - authsize); | ||
936 | } | ||
937 | |||
938 | } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) { | ||
939 | /*NON-INPLACE and DECRYPT*/ | ||
940 | cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents, | ||
941 | areq_ctx->src_sgl, areq_ctx->cryptlen, | ||
942 | areq_ctx->src_offset, is_last_table, | ||
943 | &areq_ctx->src.mlli_nents); | ||
944 | cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents, | ||
945 | areq_ctx->dst_sgl, areq_ctx->cryptlen, | ||
946 | areq_ctx->dst_offset, is_last_table, | ||
947 | &areq_ctx->dst.mlli_nents); | ||
948 | |||
949 | icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl, | ||
950 | areq_ctx->src.nents, | ||
951 | authsize, *src_last_bytes, | ||
952 | &areq_ctx->is_icv_fragmented); | ||
953 | if (icv_nents < 0) { | ||
954 | rc = -ENOTSUPP; | ||
955 | goto prepare_data_mlli_exit; | ||
956 | } | ||
957 | |||
958 | /* Backup happens only when ICV is fragmented, ICV | ||
959 | * verification is made by CPU compare in order to simplify | ||
960 | * MAC verification upon request completion | ||
961 | */ | ||
962 | if (areq_ctx->is_icv_fragmented) { | ||
963 | cc_copy_mac(dev, req, CC_SG_TO_BUF); | ||
964 | areq_ctx->icv_virt_addr = areq_ctx->backup_mac; | ||
965 | |||
966 | } else { /* Contig. ICV */ | ||
967 | sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1]; | ||
968 | /*Should hanlde if the sg is not contig.*/ | ||
969 | areq_ctx->icv_dma_addr = sg_dma_address(sg) + | ||
970 | (*src_last_bytes - authsize); | ||
971 | areq_ctx->icv_virt_addr = sg_virt(sg) + | ||
972 | (*src_last_bytes - authsize); | ||
973 | } | ||
974 | |||
975 | } else { | ||
976 | /*NON-INPLACE and ENCRYPT*/ | ||
977 | cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents, | ||
978 | areq_ctx->dst_sgl, areq_ctx->cryptlen, | ||
979 | areq_ctx->dst_offset, is_last_table, | ||
980 | &areq_ctx->dst.mlli_nents); | ||
981 | cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents, | ||
982 | areq_ctx->src_sgl, areq_ctx->cryptlen, | ||
983 | areq_ctx->src_offset, is_last_table, | ||
984 | &areq_ctx->src.mlli_nents); | ||
985 | |||
986 | icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->dst_sgl, | ||
987 | areq_ctx->dst.nents, | ||
988 | authsize, *dst_last_bytes, | ||
989 | &areq_ctx->is_icv_fragmented); | ||
990 | if (icv_nents < 0) { | ||
991 | rc = -ENOTSUPP; | ||
992 | goto prepare_data_mlli_exit; | ||
993 | } | ||
994 | |||
995 | if (!areq_ctx->is_icv_fragmented) { | ||
996 | sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1]; | ||
997 | /* Contig. ICV */ | ||
998 | areq_ctx->icv_dma_addr = sg_dma_address(sg) + | ||
999 | (*dst_last_bytes - authsize); | ||
1000 | areq_ctx->icv_virt_addr = sg_virt(sg) + | ||
1001 | (*dst_last_bytes - authsize); | ||
1002 | } else { | ||
1003 | areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr; | ||
1004 | areq_ctx->icv_virt_addr = areq_ctx->mac_buf; | ||
1005 | } | ||
1006 | } | ||
1007 | |||
1008 | prepare_data_mlli_exit: | ||
1009 | return rc; | ||
1010 | } | ||
1011 | |||
1012 | static int cc_aead_chain_data(struct cc_drvdata *drvdata, | ||
1013 | struct aead_request *req, | ||
1014 | struct buffer_array *sg_data, | ||
1015 | bool is_last_table, bool do_chain) | ||
1016 | { | ||
1017 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); | ||
1018 | struct device *dev = drvdata_to_dev(drvdata); | ||
1019 | enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; | ||
1020 | unsigned int authsize = areq_ctx->req_authsize; | ||
1021 | unsigned int src_last_bytes = 0, dst_last_bytes = 0; | ||
1022 | int rc = 0; | ||
1023 | u32 src_mapped_nents = 0, dst_mapped_nents = 0; | ||
1024 | u32 offset = 0; | ||
1025 | /* non-inplace mode */ | ||
1026 | unsigned int size_for_map = req->assoclen + req->cryptlen; | ||
1027 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
1028 | u32 sg_index = 0; | ||
1029 | bool chained = false; | ||
1030 | bool is_gcm4543 = areq_ctx->is_gcm4543; | ||
1031 | u32 size_to_skip = req->assoclen; | ||
1032 | |||
1033 | if (is_gcm4543) | ||
1034 | size_to_skip += crypto_aead_ivsize(tfm); | ||
1035 | |||
1036 | offset = size_to_skip; | ||
1037 | |||
1038 | if (!sg_data) | ||
1039 | return -EINVAL; | ||
1040 | |||
1041 | areq_ctx->src_sgl = req->src; | ||
1042 | areq_ctx->dst_sgl = req->dst; | ||
1043 | |||
1044 | if (is_gcm4543) | ||
1045 | size_for_map += crypto_aead_ivsize(tfm); | ||
1046 | |||
1047 | size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? | ||
1048 | authsize : 0; | ||
1049 | src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map, | ||
1050 | &src_last_bytes, &chained); | ||
1051 | sg_index = areq_ctx->src_sgl->length; | ||
1052 | //check where the data starts | ||
1053 | while (sg_index <= size_to_skip) { | ||
1054 | offset -= areq_ctx->src_sgl->length; | ||
1055 | areq_ctx->src_sgl = sg_next(areq_ctx->src_sgl); | ||
1056 | //if have reached the end of the sgl, then this is unexpected | ||
1057 | if (!areq_ctx->src_sgl) { | ||
1058 | dev_err(dev, "reached end of sg list. unexpected\n"); | ||
1059 | return -EINVAL; | ||
1060 | } | ||
1061 | sg_index += areq_ctx->src_sgl->length; | ||
1062 | src_mapped_nents--; | ||
1063 | } | ||
1064 | if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) { | ||
1065 | dev_err(dev, "Too many fragments. current %d max %d\n", | ||
1066 | src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES); | ||
1067 | return -ENOMEM; | ||
1068 | } | ||
1069 | |||
1070 | areq_ctx->src.nents = src_mapped_nents; | ||
1071 | |||
1072 | areq_ctx->src_offset = offset; | ||
1073 | |||
1074 | if (req->src != req->dst) { | ||
1075 | size_for_map = req->assoclen + req->cryptlen; | ||
1076 | size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? | ||
1077 | authsize : 0; | ||
1078 | if (is_gcm4543) | ||
1079 | size_for_map += crypto_aead_ivsize(tfm); | ||
1080 | |||
1081 | rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL, | ||
1082 | &areq_ctx->dst.nents, | ||
1083 | LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes, | ||
1084 | &dst_mapped_nents); | ||
1085 | if (rc) { | ||
1086 | rc = -ENOMEM; | ||
1087 | goto chain_data_exit; | ||
1088 | } | ||
1089 | } | ||
1090 | |||
1091 | dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map, | ||
1092 | &dst_last_bytes, &chained); | ||
1093 | sg_index = areq_ctx->dst_sgl->length; | ||
1094 | offset = size_to_skip; | ||
1095 | |||
1096 | //check where the data starts | ||
1097 | while (sg_index <= size_to_skip) { | ||
1098 | offset -= areq_ctx->dst_sgl->length; | ||
1099 | areq_ctx->dst_sgl = sg_next(areq_ctx->dst_sgl); | ||
1100 | //if have reached the end of the sgl, then this is unexpected | ||
1101 | if (!areq_ctx->dst_sgl) { | ||
1102 | dev_err(dev, "reached end of sg list. unexpected\n"); | ||
1103 | return -EINVAL; | ||
1104 | } | ||
1105 | sg_index += areq_ctx->dst_sgl->length; | ||
1106 | dst_mapped_nents--; | ||
1107 | } | ||
1108 | if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) { | ||
1109 | dev_err(dev, "Too many fragments. current %d max %d\n", | ||
1110 | dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES); | ||
1111 | return -ENOMEM; | ||
1112 | } | ||
1113 | areq_ctx->dst.nents = dst_mapped_nents; | ||
1114 | areq_ctx->dst_offset = offset; | ||
1115 | if (src_mapped_nents > 1 || | ||
1116 | dst_mapped_nents > 1 || | ||
1117 | do_chain) { | ||
1118 | areq_ctx->data_buff_type = CC_DMA_BUF_MLLI; | ||
1119 | rc = cc_prepare_aead_data_mlli(drvdata, req, sg_data, | ||
1120 | &src_last_bytes, | ||
1121 | &dst_last_bytes, is_last_table); | ||
1122 | } else { | ||
1123 | areq_ctx->data_buff_type = CC_DMA_BUF_DLLI; | ||
1124 | cc_prepare_aead_data_dlli(req, &src_last_bytes, | ||
1125 | &dst_last_bytes); | ||
1126 | } | ||
1127 | |||
1128 | chain_data_exit: | ||
1129 | return rc; | ||
1130 | } | ||
1131 | |||
1132 | static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata, | ||
1133 | struct aead_request *req) | ||
1134 | { | ||
1135 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); | ||
1136 | u32 curr_mlli_size = 0; | ||
1137 | |||
1138 | if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) { | ||
1139 | areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr; | ||
1140 | curr_mlli_size = areq_ctx->assoc.mlli_nents * | ||
1141 | LLI_ENTRY_BYTE_SIZE; | ||
1142 | } | ||
1143 | |||
1144 | if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) { | ||
1145 | /*Inplace case dst nents equal to src nents*/ | ||
1146 | if (req->src == req->dst) { | ||
1147 | areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents; | ||
1148 | areq_ctx->src.sram_addr = drvdata->mlli_sram_addr + | ||
1149 | curr_mlli_size; | ||
1150 | areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr; | ||
1151 | if (!areq_ctx->is_single_pass) | ||
1152 | areq_ctx->assoc.mlli_nents += | ||
1153 | areq_ctx->src.mlli_nents; | ||
1154 | } else { | ||
1155 | if (areq_ctx->gen_ctx.op_type == | ||
1156 | DRV_CRYPTO_DIRECTION_DECRYPT) { | ||
1157 | areq_ctx->src.sram_addr = | ||
1158 | drvdata->mlli_sram_addr + | ||
1159 | curr_mlli_size; | ||
1160 | areq_ctx->dst.sram_addr = | ||
1161 | areq_ctx->src.sram_addr + | ||
1162 | areq_ctx->src.mlli_nents * | ||
1163 | LLI_ENTRY_BYTE_SIZE; | ||
1164 | if (!areq_ctx->is_single_pass) | ||
1165 | areq_ctx->assoc.mlli_nents += | ||
1166 | areq_ctx->src.mlli_nents; | ||
1167 | } else { | ||
1168 | areq_ctx->dst.sram_addr = | ||
1169 | drvdata->mlli_sram_addr + | ||
1170 | curr_mlli_size; | ||
1171 | areq_ctx->src.sram_addr = | ||
1172 | areq_ctx->dst.sram_addr + | ||
1173 | areq_ctx->dst.mlli_nents * | ||
1174 | LLI_ENTRY_BYTE_SIZE; | ||
1175 | if (!areq_ctx->is_single_pass) | ||
1176 | areq_ctx->assoc.mlli_nents += | ||
1177 | areq_ctx->dst.mlli_nents; | ||
1178 | } | ||
1179 | } | ||
1180 | } | ||
1181 | } | ||
1182 | |||
1183 | int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) | ||
1184 | { | ||
1185 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); | ||
1186 | struct mlli_params *mlli_params = &areq_ctx->mlli_params; | ||
1187 | struct device *dev = drvdata_to_dev(drvdata); | ||
1188 | struct buffer_array sg_data; | ||
1189 | unsigned int authsize = areq_ctx->req_authsize; | ||
1190 | struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle; | ||
1191 | int rc = 0; | ||
1192 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
1193 | bool is_gcm4543 = areq_ctx->is_gcm4543; | ||
1194 | dma_addr_t dma_addr; | ||
1195 | u32 mapped_nents = 0; | ||
1196 | u32 dummy = 0; /*used for the assoc data fragments */ | ||
1197 | u32 size_to_map = 0; | ||
1198 | gfp_t flags = cc_gfp_flags(&req->base); | ||
1199 | |||
1200 | mlli_params->curr_pool = NULL; | ||
1201 | sg_data.num_of_buffers = 0; | ||
1202 | |||
1203 | /* copy mac to a temporary location to deal with possible | ||
1204 | * data memory overriding that caused by cache coherence problem. | ||
1205 | */ | ||
1206 | if (drvdata->coherent && | ||
1207 | areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT && | ||
1208 | req->src == req->dst) | ||
1209 | cc_copy_mac(dev, req, CC_SG_TO_BUF); | ||
1210 | |||
1211 | /* cacluate the size for cipher remove ICV in decrypt*/ | ||
1212 | areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type == | ||
1213 | DRV_CRYPTO_DIRECTION_ENCRYPT) ? | ||
1214 | req->cryptlen : | ||
1215 | (req->cryptlen - authsize); | ||
1216 | |||
1217 | dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE, | ||
1218 | DMA_BIDIRECTIONAL); | ||
1219 | if (dma_mapping_error(dev, dma_addr)) { | ||
1220 | dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n", | ||
1221 | MAX_MAC_SIZE, areq_ctx->mac_buf); | ||
1222 | rc = -ENOMEM; | ||
1223 | goto aead_map_failure; | ||
1224 | } | ||
1225 | areq_ctx->mac_buf_dma_addr = dma_addr; | ||
1226 | |||
1227 | if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { | ||
1228 | void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET; | ||
1229 | |||
1230 | dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE, | ||
1231 | DMA_TO_DEVICE); | ||
1232 | |||
1233 | if (dma_mapping_error(dev, dma_addr)) { | ||
1234 | dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n", | ||
1235 | AES_BLOCK_SIZE, addr); | ||
1236 | areq_ctx->ccm_iv0_dma_addr = 0; | ||
1237 | rc = -ENOMEM; | ||
1238 | goto aead_map_failure; | ||
1239 | } | ||
1240 | areq_ctx->ccm_iv0_dma_addr = dma_addr; | ||
1241 | |||
1242 | if (cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config, | ||
1243 | &sg_data, req->assoclen)) { | ||
1244 | rc = -ENOMEM; | ||
1245 | goto aead_map_failure; | ||
1246 | } | ||
1247 | } | ||
1248 | |||
1249 | if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) { | ||
1250 | dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE, | ||
1251 | DMA_BIDIRECTIONAL); | ||
1252 | if (dma_mapping_error(dev, dma_addr)) { | ||
1253 | dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n", | ||
1254 | AES_BLOCK_SIZE, areq_ctx->hkey); | ||
1255 | rc = -ENOMEM; | ||
1256 | goto aead_map_failure; | ||
1257 | } | ||
1258 | areq_ctx->hkey_dma_addr = dma_addr; | ||
1259 | |||
1260 | dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block, | ||
1261 | AES_BLOCK_SIZE, DMA_TO_DEVICE); | ||
1262 | if (dma_mapping_error(dev, dma_addr)) { | ||
1263 | dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n", | ||
1264 | AES_BLOCK_SIZE, &areq_ctx->gcm_len_block); | ||
1265 | rc = -ENOMEM; | ||
1266 | goto aead_map_failure; | ||
1267 | } | ||
1268 | areq_ctx->gcm_block_len_dma_addr = dma_addr; | ||
1269 | |||
1270 | dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1, | ||
1271 | AES_BLOCK_SIZE, DMA_TO_DEVICE); | ||
1272 | |||
1273 | if (dma_mapping_error(dev, dma_addr)) { | ||
1274 | dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n", | ||
1275 | AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1)); | ||
1276 | areq_ctx->gcm_iv_inc1_dma_addr = 0; | ||
1277 | rc = -ENOMEM; | ||
1278 | goto aead_map_failure; | ||
1279 | } | ||
1280 | areq_ctx->gcm_iv_inc1_dma_addr = dma_addr; | ||
1281 | |||
1282 | dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2, | ||
1283 | AES_BLOCK_SIZE, DMA_TO_DEVICE); | ||
1284 | |||
1285 | if (dma_mapping_error(dev, dma_addr)) { | ||
1286 | dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n", | ||
1287 | AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2)); | ||
1288 | areq_ctx->gcm_iv_inc2_dma_addr = 0; | ||
1289 | rc = -ENOMEM; | ||
1290 | goto aead_map_failure; | ||
1291 | } | ||
1292 | areq_ctx->gcm_iv_inc2_dma_addr = dma_addr; | ||
1293 | } | ||
1294 | |||
1295 | size_to_map = req->cryptlen + req->assoclen; | ||
1296 | if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) | ||
1297 | size_to_map += authsize; | ||
1298 | |||
1299 | if (is_gcm4543) | ||
1300 | size_to_map += crypto_aead_ivsize(tfm); | ||
1301 | rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL, | ||
1302 | &areq_ctx->src.nents, | ||
1303 | (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + | ||
1304 | LLI_MAX_NUM_OF_DATA_ENTRIES), | ||
1305 | &dummy, &mapped_nents); | ||
1306 | if (rc) { | ||
1307 | rc = -ENOMEM; | ||
1308 | goto aead_map_failure; | ||
1309 | } | ||
1310 | |||
1311 | if (areq_ctx->is_single_pass) { | ||
1312 | /* | ||
1313 | * Create MLLI table for: | ||
1314 | * (1) Assoc. data | ||
1315 | * (2) Src/Dst SGLs | ||
1316 | * Note: IV is contg. buffer (not an SGL) | ||
1317 | */ | ||
1318 | rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false); | ||
1319 | if (rc) | ||
1320 | goto aead_map_failure; | ||
1321 | rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false); | ||
1322 | if (rc) | ||
1323 | goto aead_map_failure; | ||
1324 | rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false); | ||
1325 | if (rc) | ||
1326 | goto aead_map_failure; | ||
1327 | } else { /* DOUBLE-PASS flow */ | ||
1328 | /* | ||
1329 | * Prepare MLLI table(s) in this order: | ||
1330 | * | ||
1331 | * If ENCRYPT/DECRYPT (inplace): | ||
1332 | * (1) MLLI table for assoc | ||
1333 | * (2) IV entry (chained right after end of assoc) | ||
1334 | * (3) MLLI for src/dst (inplace operation) | ||
1335 | * | ||
1336 | * If ENCRYPT (non-inplace) | ||
1337 | * (1) MLLI table for assoc | ||
1338 | * (2) IV entry (chained right after end of assoc) | ||
1339 | * (3) MLLI for dst | ||
1340 | * (4) MLLI for src | ||
1341 | * | ||
1342 | * If DECRYPT (non-inplace) | ||
1343 | * (1) MLLI table for assoc | ||
1344 | * (2) IV entry (chained right after end of assoc) | ||
1345 | * (3) MLLI for src | ||
1346 | * (4) MLLI for dst | ||
1347 | */ | ||
1348 | rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true); | ||
1349 | if (rc) | ||
1350 | goto aead_map_failure; | ||
1351 | rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true); | ||
1352 | if (rc) | ||
1353 | goto aead_map_failure; | ||
1354 | rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true); | ||
1355 | if (rc) | ||
1356 | goto aead_map_failure; | ||
1357 | } | ||
1358 | |||
1359 | /* Mlli support -start building the MLLI according to the above | ||
1360 | * results | ||
1361 | */ | ||
1362 | if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI || | ||
1363 | areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) { | ||
1364 | mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; | ||
1365 | rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); | ||
1366 | if (rc) | ||
1367 | goto aead_map_failure; | ||
1368 | |||
1369 | cc_update_aead_mlli_nents(drvdata, req); | ||
1370 | dev_dbg(dev, "assoc params mn %d\n", | ||
1371 | areq_ctx->assoc.mlli_nents); | ||
1372 | dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents); | ||
1373 | dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents); | ||
1374 | } | ||
1375 | return 0; | ||
1376 | |||
1377 | aead_map_failure: | ||
1378 | cc_unmap_aead_request(dev, req); | ||
1379 | return rc; | ||
1380 | } | ||
1381 | |||
500 | int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx, | 1382 | int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx, |
501 | struct scatterlist *src, unsigned int nbytes, | 1383 | struct scatterlist *src, unsigned int nbytes, |
502 | bool do_update, gfp_t flags) | 1384 | bool do_update, gfp_t flags) |