aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/caam
diff options
context:
space:
mode:
authorYuan Kang <Yuan.Kang@freescale.com>2011-07-14 23:21:42 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2011-07-14 23:21:42 -0400
commitacdca31dba86c4f426460aa000d13930a00549b7 (patch)
tree198f01a269af17eb0d201a00a980229acbc9a5a0 /drivers/crypto/caam
parent1acebad3d8db8d5220b3010c2eb160c625434cf2 (diff)
crypto: caam - ablkcipher support
caam now supports encrypt and decrypt for aes, des and 3des Signed-off-by: Yuan Kang <Yuan.Kang@freescale.com> Signed-off-by: Kim Phillips <kim.phillips@freescale.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/caam')
-rw-r--r--drivers/crypto/caam/caamalg.c510
-rw-r--r--drivers/crypto/caam/compat.h1
2 files changed, 511 insertions, 0 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index ed7d59d168af..4159265b453b 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -69,6 +69,12 @@
69#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ) 69#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
70#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ) 70#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
71 71
72#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
73#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
74 20 * CAAM_CMD_SZ)
75#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
76 15 * CAAM_CMD_SZ)
77
72#define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \ 78#define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \
73 CAAM_MAX_KEY_SIZE) 79 CAAM_MAX_KEY_SIZE)
74#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) 80#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
@@ -132,6 +138,19 @@ static inline void aead_append_ld_iv(u32 *desc, int ivsize)
132} 138}
133 139
134/* 140/*
141 * For ablkcipher encrypt and decrypt, read from req->src and
142 * write to req->dst
143 */
144static inline void ablkcipher_append_src_dst(u32 *desc)
145{
146 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \
147 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \
148 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | \
149 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); \
150 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); \
151}
152
153/*
135 * If all data, including src (with assoc and iv) or dst (with iv only) are 154 * If all data, including src (with assoc and iv) or dst (with iv only) are
136 * contiguous 155 * contiguous
137 */ 156 */
@@ -625,6 +644,119 @@ badkey:
625 return -EINVAL; 644 return -EINVAL;
626} 645}
627 646
647static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
648 const u8 *key, unsigned int keylen)
649{
650 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
651 struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
652 struct device *jrdev = ctx->jrdev;
653 int ret = 0;
654 u32 *key_jump_cmd, *jump_cmd;
655 u32 *desc;
656
657#ifdef DEBUG
658 print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
659 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
660#endif
661
662 memcpy(ctx->key, key, keylen);
663 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
664 DMA_TO_DEVICE);
665 if (dma_mapping_error(jrdev, ctx->key_dma)) {
666 dev_err(jrdev, "unable to map key i/o memory\n");
667 return -ENOMEM;
668 }
669 ctx->enckeylen = keylen;
670
671 /* ablkcipher_encrypt shared descriptor */
672 desc = ctx->sh_desc_enc;
673 init_sh_desc(desc, HDR_SHARE_WAIT);
674 /* Skip if already shared */
675 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
676 JUMP_COND_SHRD);
677
678 /* Load class1 key only */
679 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
680 ctx->enckeylen, CLASS_1 |
681 KEY_DEST_CLASS_REG);
682
683 set_jump_tgt_here(desc, key_jump_cmd);
684
685 /* Propagate errors from shared to job descriptor */
686 append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
687
688 /* Load iv */
689 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
690 LDST_CLASS_1_CCB | tfm->ivsize);
691
692 /* Load operation */
693 append_operation(desc, ctx->class1_alg_type |
694 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
695
696 /* Perform operation */
697 ablkcipher_append_src_dst(desc);
698
699 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
700 desc_bytes(desc),
701 DMA_TO_DEVICE);
702 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
703 dev_err(jrdev, "unable to map shared descriptor\n");
704 return -ENOMEM;
705 }
706#ifdef DEBUG
707 print_hex_dump(KERN_ERR, "ablkcipher enc shdesc@"xstr(__LINE__)": ",
708 DUMP_PREFIX_ADDRESS, 16, 4, desc,
709 desc_bytes(desc), 1);
710#endif
711 /* ablkcipher_decrypt shared descriptor */
712 desc = ctx->sh_desc_dec;
713
714 init_sh_desc(desc, HDR_SHARE_WAIT);
715 /* Skip if already shared */
716 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
717 JUMP_COND_SHRD);
718
719 /* Load class1 key only */
720 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
721 ctx->enckeylen, CLASS_1 |
722 KEY_DEST_CLASS_REG);
723
724 /* For aead, only propagate error immediately if shared */
725 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
726 set_jump_tgt_here(desc, key_jump_cmd);
727 append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
728 set_jump_tgt_here(desc, jump_cmd);
729
730 /* load IV */
731 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
732 LDST_CLASS_1_CCB | tfm->ivsize);
733
734 /* Choose operation */
735 append_dec_op1(desc, ctx->class1_alg_type);
736
737 /* Perform operation */
738 ablkcipher_append_src_dst(desc);
739
740 /* Wait for key to load before allowing propagating error */
741 append_dec_shr_done(desc);
742
743 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
744 desc_bytes(desc),
745 DMA_TO_DEVICE);
746 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
747 dev_err(jrdev, "unable to map shared descriptor\n");
748 return -ENOMEM;
749 }
750
751#ifdef DEBUG
752 print_hex_dump(KERN_ERR, "ablkcipher dec shdesc@"xstr(__LINE__)": ",
753 DUMP_PREFIX_ADDRESS, 16, 4, desc,
754 desc_bytes(desc), 1);
755#endif
756
757 return ret;
758}
759
628struct link_tbl_entry { 760struct link_tbl_entry {
629 u64 ptr; 761 u64 ptr;
630 u32 len; 762 u32 len;
@@ -655,6 +787,26 @@ struct aead_edesc {
655 u32 hw_desc[0]; 787 u32 hw_desc[0];
656}; 788};
657 789
790/*
791 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
792 * @src_nents: number of segments in input scatterlist
793 * @dst_nents: number of segments in output scatterlist
794 * @iv_dma: dma address of iv for checking continuity and link table
795 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
796 * @link_tbl_bytes: length of dma mapped link_tbl space
797 * @link_tbl_dma: bus physical mapped address of h/w link table
798 * @hw_desc: the h/w job descriptor followed by any referenced link tables
799 */
800struct ablkcipher_edesc {
801 int src_nents;
802 int dst_nents;
803 dma_addr_t iv_dma;
804 int link_tbl_bytes;
805 dma_addr_t link_tbl_dma;
806 struct link_tbl_entry *link_tbl;
807 u32 hw_desc[0];
808};
809
658static void caam_unmap(struct device *dev, struct scatterlist *src, 810static void caam_unmap(struct device *dev, struct scatterlist *src,
659 struct scatterlist *dst, int src_nents, int dst_nents, 811 struct scatterlist *dst, int src_nents, int dst_nents,
660 dma_addr_t iv_dma, int ivsize, dma_addr_t link_tbl_dma, 812 dma_addr_t iv_dma, int ivsize, dma_addr_t link_tbl_dma,
@@ -689,6 +841,19 @@ static void aead_unmap(struct device *dev,
689 edesc->link_tbl_bytes); 841 edesc->link_tbl_bytes);
690} 842}
691 843
844static void ablkcipher_unmap(struct device *dev,
845 struct ablkcipher_edesc *edesc,
846 struct ablkcipher_request *req)
847{
848 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
849 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
850
851 caam_unmap(dev, req->src, req->dst,
852 edesc->src_nents, edesc->dst_nents,
853 edesc->iv_dma, ivsize, edesc->link_tbl_dma,
854 edesc->link_tbl_bytes);
855}
856
692static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, 857static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
693 void *context) 858 void *context)
694{ 859{
@@ -790,6 +955,77 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
790 aead_request_complete(req, err); 955 aead_request_complete(req, err);
791} 956}
792 957
958static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
959 void *context)
960{
961 struct ablkcipher_request *req = context;
962 struct ablkcipher_edesc *edesc;
963#ifdef DEBUG
964 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
965 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
966
967 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
968#endif
969
970 edesc = (struct ablkcipher_edesc *)((char *)desc -
971 offsetof(struct ablkcipher_edesc, hw_desc));
972
973 if (err) {
974 char tmp[CAAM_ERROR_STR_MAX];
975
976 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
977 }
978
979#ifdef DEBUG
980 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
981 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
982 edesc->src_nents > 1 ? 100 : ivsize, 1);
983 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
984 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
985 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
986#endif
987
988 ablkcipher_unmap(jrdev, edesc, req);
989 kfree(edesc);
990
991 ablkcipher_request_complete(req, err);
992}
993
994static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
995 void *context)
996{
997 struct ablkcipher_request *req = context;
998 struct ablkcipher_edesc *edesc;
999#ifdef DEBUG
1000 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1001 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1002
1003 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1004#endif
1005
1006 edesc = (struct ablkcipher_edesc *)((char *)desc -
1007 offsetof(struct ablkcipher_edesc, hw_desc));
1008 if (err) {
1009 char tmp[CAAM_ERROR_STR_MAX];
1010
1011 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
1012 }
1013
1014#ifdef DEBUG
1015 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
1016 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1017 ivsize, 1);
1018 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
1019 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1020 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1021#endif
1022
1023 ablkcipher_unmap(jrdev, edesc, req);
1024 kfree(edesc);
1025
1026 ablkcipher_request_complete(req, err);
1027}
1028
793static void sg_to_link_tbl_one(struct link_tbl_entry *link_tbl_ptr, 1029static void sg_to_link_tbl_one(struct link_tbl_entry *link_tbl_ptr,
794 dma_addr_t dma, u32 len, u32 offset) 1030 dma_addr_t dma, u32 len, u32 offset)
795{ 1031{
@@ -978,6 +1214,63 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
978} 1214}
979 1215
980/* 1216/*
1217 * Fill in ablkcipher job descriptor
1218 */
1219static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1220 struct ablkcipher_edesc *edesc,
1221 struct ablkcipher_request *req,
1222 bool iv_contig)
1223{
1224 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1225 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1226 u32 *desc = edesc->hw_desc;
1227 u32 out_options = 0, in_options;
1228 dma_addr_t dst_dma, src_dma;
1229 int len, link_tbl_index = 0;
1230
1231#ifdef DEBUG
1232 print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
1233 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1234 ivsize, 1);
1235 print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
1236 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1237 edesc->src_nents ? 100 : req->nbytes, 1);
1238#endif
1239
1240 len = desc_len(sh_desc);
1241 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1242
1243 if (iv_contig) {
1244 src_dma = edesc->iv_dma;
1245 in_options = 0;
1246 } else {
1247 src_dma = edesc->link_tbl_dma;
1248 link_tbl_index += (iv_contig ? 0 : 1) + edesc->src_nents;
1249 in_options = LDST_SGF;
1250 }
1251 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
1252
1253 if (likely(req->src == req->dst)) {
1254 if (!edesc->src_nents && iv_contig) {
1255 dst_dma = sg_dma_address(req->src);
1256 } else {
1257 dst_dma = edesc->link_tbl_dma +
1258 sizeof(struct link_tbl_entry);
1259 out_options = LDST_SGF;
1260 }
1261 } else {
1262 if (!edesc->dst_nents) {
1263 dst_dma = sg_dma_address(req->dst);
1264 } else {
1265 dst_dma = edesc->link_tbl_dma +
1266 link_tbl_index * sizeof(struct link_tbl_entry);
1267 out_options = LDST_SGF;
1268 }
1269 }
1270 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1271}
1272
1273/*
981 * derive number of elements in scatterlist 1274 * derive number of elements in scatterlist
982 */ 1275 */
983static int sg_count(struct scatterlist *sg_list, int nbytes) 1276static int sg_count(struct scatterlist *sg_list, int nbytes)
@@ -1327,7 +1620,171 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq)
1327 return ret; 1620 return ret;
1328} 1621}
1329 1622
1623/*
1624 * allocate and map the ablkcipher extended descriptor for ablkcipher
1625 */
1626static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1627 *req, int desc_bytes,
1628 bool *iv_contig_out)
1629{
1630 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1631 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1632 struct device *jrdev = ctx->jrdev;
1633 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1634 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
1635 GFP_KERNEL : GFP_ATOMIC;
1636 int src_nents, dst_nents = 0, link_tbl_bytes;
1637 struct ablkcipher_edesc *edesc;
1638 dma_addr_t iv_dma = 0;
1639 bool iv_contig = false;
1640 int sgc;
1641 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1642 int link_tbl_index;
1643
1644 src_nents = sg_count(req->src, req->nbytes);
1645
1646 if (unlikely(req->dst != req->src))
1647 dst_nents = sg_count(req->dst, req->nbytes);
1648
1649 if (likely(req->src == req->dst)) {
1650 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1651 DMA_BIDIRECTIONAL);
1652 } else {
1653 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1654 DMA_TO_DEVICE);
1655 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
1656 DMA_FROM_DEVICE);
1657 }
1658
1659 /*
1660 * Check if iv can be contiguous with source and destination.
1661 * If so, include it. If not, create scatterlist.
1662 */
1663 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
1664 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
1665 iv_contig = true;
1666 else
1667 src_nents = src_nents ? : 1;
1668 link_tbl_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
1669 sizeof(struct link_tbl_entry);
1670
1671 /* allocate space for base edesc and hw desc commands, link tables */
1672 edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
1673 link_tbl_bytes, GFP_DMA | flags);
1674 if (!edesc) {
1675 dev_err(jrdev, "could not allocate extended descriptor\n");
1676 return ERR_PTR(-ENOMEM);
1677 }
1678
1679 edesc->src_nents = src_nents;
1680 edesc->dst_nents = dst_nents;
1681 edesc->link_tbl_bytes = link_tbl_bytes;
1682 edesc->link_tbl = (void *)edesc + sizeof(struct ablkcipher_edesc) +
1683 desc_bytes;
1684
1685 link_tbl_index = 0;
1686 if (!iv_contig) {
1687 sg_to_link_tbl_one(edesc->link_tbl, iv_dma, ivsize, 0);
1688 sg_to_link_tbl_last(req->src, src_nents,
1689 edesc->link_tbl + 1, 0);
1690 link_tbl_index += 1 + src_nents;
1691 }
1692
1693 if (unlikely(dst_nents)) {
1694 sg_to_link_tbl_last(req->dst, dst_nents,
1695 edesc->link_tbl + link_tbl_index, 0);
1696 }
1697
1698 edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
1699 link_tbl_bytes, DMA_TO_DEVICE);
1700 edesc->iv_dma = iv_dma;
1701
1702#ifdef DEBUG
1703 print_hex_dump(KERN_ERR, "ablkcipher link_tbl@"xstr(__LINE__)": ",
1704 DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl,
1705 link_tbl_bytes, 1);
1706#endif
1707
1708 *iv_contig_out = iv_contig;
1709 return edesc;
1710}
1711
1712static int ablkcipher_encrypt(struct ablkcipher_request *req)
1713{
1714 struct ablkcipher_edesc *edesc;
1715 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1716 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1717 struct device *jrdev = ctx->jrdev;
1718 bool iv_contig;
1719 u32 *desc;
1720 int ret = 0;
1721
1722 /* allocate extended descriptor */
1723 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1724 CAAM_CMD_SZ, &iv_contig);
1725 if (IS_ERR(edesc))
1726 return PTR_ERR(edesc);
1727
1728 /* Create and submit job descriptor*/
1729 init_ablkcipher_job(ctx->sh_desc_enc,
1730 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
1731#ifdef DEBUG
1732 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
1733 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1734 desc_bytes(edesc->hw_desc), 1);
1735#endif
1736 desc = edesc->hw_desc;
1737 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1738
1739 if (!ret) {
1740 ret = -EINPROGRESS;
1741 } else {
1742 ablkcipher_unmap(jrdev, edesc, req);
1743 kfree(edesc);
1744 }
1745
1746 return ret;
1747}
1748
1749static int ablkcipher_decrypt(struct ablkcipher_request *req)
1750{
1751 struct ablkcipher_edesc *edesc;
1752 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1753 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1754 struct device *jrdev = ctx->jrdev;
1755 bool iv_contig;
1756 u32 *desc;
1757 int ret = 0;
1758
1759 /* allocate extended descriptor */
1760 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1761 CAAM_CMD_SZ, &iv_contig);
1762 if (IS_ERR(edesc))
1763 return PTR_ERR(edesc);
1764
1765 /* Create and submit job descriptor*/
1766 init_ablkcipher_job(ctx->sh_desc_dec,
1767 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
1768 desc = edesc->hw_desc;
1769#ifdef DEBUG
1770 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
1771 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1772 desc_bytes(edesc->hw_desc), 1);
1773#endif
1774
1775 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
1776 if (!ret) {
1777 ret = -EINPROGRESS;
1778 } else {
1779 ablkcipher_unmap(jrdev, edesc, req);
1780 kfree(edesc);
1781 }
1782
1783 return ret;
1784}
1785
1330#define template_aead template_u.aead 1786#define template_aead template_u.aead
1787#define template_ablkcipher template_u.ablkcipher
1331struct caam_alg_template { 1788struct caam_alg_template {
1332 char name[CRYPTO_MAX_ALG_NAME]; 1789 char name[CRYPTO_MAX_ALG_NAME];
1333 char driver_name[CRYPTO_MAX_ALG_NAME]; 1790 char driver_name[CRYPTO_MAX_ALG_NAME];
@@ -1525,6 +1982,55 @@ static struct caam_alg_template driver_algs[] = {
1525 OP_ALG_AAI_HMAC_PRECOMP, 1982 OP_ALG_AAI_HMAC_PRECOMP,
1526 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, 1983 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1527 }, 1984 },
1985 /* ablkcipher descriptor */
1986 {
1987 .name = "cbc(aes)",
1988 .driver_name = "cbc-aes-caam",
1989 .blocksize = AES_BLOCK_SIZE,
1990 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1991 .template_ablkcipher = {
1992 .setkey = ablkcipher_setkey,
1993 .encrypt = ablkcipher_encrypt,
1994 .decrypt = ablkcipher_decrypt,
1995 .geniv = "eseqiv",
1996 .min_keysize = AES_MIN_KEY_SIZE,
1997 .max_keysize = AES_MAX_KEY_SIZE,
1998 .ivsize = AES_BLOCK_SIZE,
1999 },
2000 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2001 },
2002 {
2003 .name = "cbc(des3_ede)",
2004 .driver_name = "cbc-3des-caam",
2005 .blocksize = DES3_EDE_BLOCK_SIZE,
2006 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2007 .template_ablkcipher = {
2008 .setkey = ablkcipher_setkey,
2009 .encrypt = ablkcipher_encrypt,
2010 .decrypt = ablkcipher_decrypt,
2011 .geniv = "eseqiv",
2012 .min_keysize = DES3_EDE_KEY_SIZE,
2013 .max_keysize = DES3_EDE_KEY_SIZE,
2014 .ivsize = DES3_EDE_BLOCK_SIZE,
2015 },
2016 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2017 },
2018 {
2019 .name = "cbc(des)",
2020 .driver_name = "cbc-des-caam",
2021 .blocksize = DES_BLOCK_SIZE,
2022 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2023 .template_ablkcipher = {
2024 .setkey = ablkcipher_setkey,
2025 .encrypt = ablkcipher_encrypt,
2026 .decrypt = ablkcipher_decrypt,
2027 .geniv = "eseqiv",
2028 .min_keysize = DES_KEY_SIZE,
2029 .max_keysize = DES_KEY_SIZE,
2030 .ivsize = DES_BLOCK_SIZE,
2031 },
2032 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2033 }
1528}; 2034};
1529 2035
1530struct caam_crypto_alg { 2036struct caam_crypto_alg {
@@ -1644,6 +2150,10 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
1644 alg->cra_ctxsize = sizeof(struct caam_ctx); 2150 alg->cra_ctxsize = sizeof(struct caam_ctx);
1645 alg->cra_flags = CRYPTO_ALG_ASYNC | template->type; 2151 alg->cra_flags = CRYPTO_ALG_ASYNC | template->type;
1646 switch (template->type) { 2152 switch (template->type) {
2153 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2154 alg->cra_type = &crypto_ablkcipher_type;
2155 alg->cra_ablkcipher = template->template_ablkcipher;
2156 break;
1647 case CRYPTO_ALG_TYPE_AEAD: 2157 case CRYPTO_ALG_TYPE_AEAD:
1648 alg->cra_type = &crypto_aead_type; 2158 alg->cra_type = &crypto_aead_type;
1649 alg->cra_aead = template->template_aead; 2159 alg->cra_aead = template->template_aead;
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index 950450346f70..d38f2afaa966 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -31,5 +31,6 @@
31#include <crypto/aead.h> 31#include <crypto/aead.h>
32#include <crypto/authenc.h> 32#include <crypto/authenc.h>
33#include <crypto/scatterwalk.h> 33#include <crypto/scatterwalk.h>
34#include <crypto/internal/skcipher.h>
34 35
35#endif /* !defined(CAAM_COMPAT_H) */ 36#endif /* !defined(CAAM_COMPAT_H) */