aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/caam
diff options
context:
space:
mode:
authorHoria Geantă <horia.geanta@nxp.com>2018-09-12 04:59:34 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2018-09-21 01:24:51 -0400
commit226853ac3ebee425300b9a692c9cd5f9e2e72571 (patch)
treea4ef584dff4738c75d6146e8c61e3e2bf7c85a13 /drivers/crypto/caam
parent8d818c1055013d355d36188f21c7535687374f6c (diff)
crypto: caam/qi2 - add skcipher algorithms
Add support to submit the following skcipher algorithms via the DPSECI backend: cbc({aes,des,des3_ede}) ctr(aes), rfc3686(ctr(aes)) xts(aes) Signed-off-by: Horia Geantă <horia.geanta@nxp.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/caam')
-rw-r--r--drivers/crypto/caam/Kconfig1
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c562
-rw-r--r--drivers/crypto/caam/caamalg_qi2.h20
3 files changed, 582 insertions, 1 deletions
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index a186b853b05c..8d162cb34591 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -157,6 +157,7 @@ config CRYPTO_DEV_FSL_DPAA2_CAAM
157 tristate "QorIQ DPAA2 CAAM (DPSECI) driver" 157 tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
158 depends on FSL_MC_DPIO 158 depends on FSL_MC_DPIO
159 select CRYPTO_DEV_FSL_CAAM_COMMON 159 select CRYPTO_DEV_FSL_CAAM_COMMON
160 select CRYPTO_BLKCIPHER
160 select CRYPTO_AUTHENC 161 select CRYPTO_AUTHENC
161 select CRYPTO_AEAD 162 select CRYPTO_AEAD
162 help 163 help
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 9d97a51070b1..5bb77c778da6 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -56,6 +56,12 @@ struct caam_aead_alg {
56 bool registered; 56 bool registered;
57}; 57};
58 58
59struct caam_skcipher_alg {
60 struct skcipher_alg skcipher;
61 struct caam_alg_entry caam;
62 bool registered;
63};
64
59/** 65/**
60 * caam_ctx - per-session context 66 * caam_ctx - per-session context
61 * @flc: Flow Contexts array 67 * @flc: Flow Contexts array
@@ -794,6 +800,248 @@ static int rfc4543_setkey(struct crypto_aead *aead,
794 return rfc4543_set_sh_desc(aead); 800 return rfc4543_set_sh_desc(aead);
795} 801}
796 802
803static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
804 unsigned int keylen)
805{
806 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
807 struct caam_skcipher_alg *alg =
808 container_of(crypto_skcipher_alg(skcipher),
809 struct caam_skcipher_alg, skcipher);
810 struct device *dev = ctx->dev;
811 struct caam_flc *flc;
812 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
813 u32 *desc;
814 u32 ctx1_iv_off = 0;
815 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
816 OP_ALG_AAI_CTR_MOD128);
817 const bool is_rfc3686 = alg->caam.rfc3686;
818
819 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
820 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
821
822 /*
823 * AES-CTR needs to load IV in CONTEXT1 reg
824 * at an offset of 128bits (16bytes)
825 * CONTEXT1[255:128] = IV
826 */
827 if (ctr_mode)
828 ctx1_iv_off = 16;
829
830 /*
831 * RFC3686 specific:
832 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
833 * | *key = {KEY, NONCE}
834 */
835 if (is_rfc3686) {
836 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
837 keylen -= CTR_RFC3686_NONCE_SIZE;
838 }
839
840 ctx->cdata.keylen = keylen;
841 ctx->cdata.key_virt = key;
842 ctx->cdata.key_inline = true;
843
844 /* skcipher_encrypt shared descriptor */
845 flc = &ctx->flc[ENCRYPT];
846 desc = flc->sh_desc;
847 cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
848 ctx1_iv_off);
849 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
850 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
851 sizeof(flc->flc) + desc_bytes(desc),
852 ctx->dir);
853
854 /* skcipher_decrypt shared descriptor */
855 flc = &ctx->flc[DECRYPT];
856 desc = flc->sh_desc;
857 cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
858 ctx1_iv_off);
859 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
860 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
861 sizeof(flc->flc) + desc_bytes(desc),
862 ctx->dir);
863
864 return 0;
865}
866
867static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
868 unsigned int keylen)
869{
870 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
871 struct device *dev = ctx->dev;
872 struct caam_flc *flc;
873 u32 *desc;
874
875 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
876 dev_err(dev, "key size mismatch\n");
877 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
878 return -EINVAL;
879 }
880
881 ctx->cdata.keylen = keylen;
882 ctx->cdata.key_virt = key;
883 ctx->cdata.key_inline = true;
884
885 /* xts_skcipher_encrypt shared descriptor */
886 flc = &ctx->flc[ENCRYPT];
887 desc = flc->sh_desc;
888 cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
889 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
890 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
891 sizeof(flc->flc) + desc_bytes(desc),
892 ctx->dir);
893
894 /* xts_skcipher_decrypt shared descriptor */
895 flc = &ctx->flc[DECRYPT];
896 desc = flc->sh_desc;
897 cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
898 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
899 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
900 sizeof(flc->flc) + desc_bytes(desc),
901 ctx->dir);
902
903 return 0;
904}
905
906static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
907{
908 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
909 struct caam_request *req_ctx = skcipher_request_ctx(req);
910 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
911 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
912 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
913 struct device *dev = ctx->dev;
914 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
915 GFP_KERNEL : GFP_ATOMIC;
916 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
917 struct skcipher_edesc *edesc;
918 dma_addr_t iv_dma;
919 u8 *iv;
920 int ivsize = crypto_skcipher_ivsize(skcipher);
921 int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
922 struct dpaa2_sg_entry *sg_table;
923
924 src_nents = sg_nents_for_len(req->src, req->cryptlen);
925 if (unlikely(src_nents < 0)) {
926 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
927 req->cryptlen);
928 return ERR_PTR(src_nents);
929 }
930
931 if (unlikely(req->dst != req->src)) {
932 dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
933 if (unlikely(dst_nents < 0)) {
934 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
935 req->cryptlen);
936 return ERR_PTR(dst_nents);
937 }
938
939 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
940 DMA_TO_DEVICE);
941 if (unlikely(!mapped_src_nents)) {
942 dev_err(dev, "unable to map source\n");
943 return ERR_PTR(-ENOMEM);
944 }
945
946 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
947 DMA_FROM_DEVICE);
948 if (unlikely(!mapped_dst_nents)) {
949 dev_err(dev, "unable to map destination\n");
950 dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
951 return ERR_PTR(-ENOMEM);
952 }
953 } else {
954 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
955 DMA_BIDIRECTIONAL);
956 if (unlikely(!mapped_src_nents)) {
957 dev_err(dev, "unable to map source\n");
958 return ERR_PTR(-ENOMEM);
959 }
960 }
961
962 qm_sg_ents = 1 + mapped_src_nents;
963 dst_sg_idx = qm_sg_ents;
964
965 qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
966 qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
967 if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
968 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
969 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
970 qm_sg_ents, ivsize);
971 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
972 0, 0, 0);
973 return ERR_PTR(-ENOMEM);
974 }
975
976 /* allocate space for base edesc, link tables and IV */
977 edesc = qi_cache_zalloc(GFP_DMA | flags);
978 if (unlikely(!edesc)) {
979 dev_err(dev, "could not allocate extended descriptor\n");
980 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
981 0, 0, 0);
982 return ERR_PTR(-ENOMEM);
983 }
984
985 /* Make sure IV is located in a DMAable area */
986 sg_table = &edesc->sgt[0];
987 iv = (u8 *)(sg_table + qm_sg_ents);
988 memcpy(iv, req->iv, ivsize);
989
990 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
991 if (dma_mapping_error(dev, iv_dma)) {
992 dev_err(dev, "unable to map IV\n");
993 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
994 0, 0, 0);
995 qi_cache_free(edesc);
996 return ERR_PTR(-ENOMEM);
997 }
998
999 edesc->src_nents = src_nents;
1000 edesc->dst_nents = dst_nents;
1001 edesc->iv_dma = iv_dma;
1002 edesc->qm_sg_bytes = qm_sg_bytes;
1003
1004 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1005 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
1006
1007 if (mapped_dst_nents > 1)
1008 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1009 dst_sg_idx, 0);
1010
1011 edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1012 DMA_TO_DEVICE);
1013 if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1014 dev_err(dev, "unable to map S/G table\n");
1015 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
1016 iv_dma, ivsize, 0, 0);
1017 qi_cache_free(edesc);
1018 return ERR_PTR(-ENOMEM);
1019 }
1020
1021 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1022 dpaa2_fl_set_final(in_fle, true);
1023 dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
1024 dpaa2_fl_set_len(out_fle, req->cryptlen);
1025
1026 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1027 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1028
1029 if (req->src == req->dst) {
1030 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1031 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1032 sizeof(*sg_table));
1033 } else if (mapped_dst_nents > 1) {
1034 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1035 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1036 sizeof(*sg_table));
1037 } else {
1038 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
1039 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
1040 }
1041
1042 return edesc;
1043}
1044
797static void aead_unmap(struct device *dev, struct aead_edesc *edesc, 1045static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
798 struct aead_request *req) 1046 struct aead_request *req)
799{ 1047{
@@ -805,6 +1053,16 @@ static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
805 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); 1053 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
806} 1054}
807 1055
1056static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1057 struct skcipher_request *req)
1058{
1059 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1060 int ivsize = crypto_skcipher_ivsize(skcipher);
1061
1062 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1063 edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
1064}
1065
808static void aead_encrypt_done(void *cbk_ctx, u32 status) 1066static void aead_encrypt_done(void *cbk_ctx, u32 status)
809{ 1067{
810 struct crypto_async_request *areq = cbk_ctx; 1068 struct crypto_async_request *areq = cbk_ctx;
@@ -930,6 +1188,138 @@ static int ipsec_gcm_decrypt(struct aead_request *req)
930 return aead_decrypt(req); 1188 return aead_decrypt(req);
931} 1189}
932 1190
1191static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1192{
1193 struct crypto_async_request *areq = cbk_ctx;
1194 struct skcipher_request *req = skcipher_request_cast(areq);
1195 struct caam_request *req_ctx = to_caam_req(areq);
1196 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1197 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1198 struct skcipher_edesc *edesc = req_ctx->edesc;
1199 int ecode = 0;
1200 int ivsize = crypto_skcipher_ivsize(skcipher);
1201
1202 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1203
1204 if (unlikely(status)) {
1205 caam_qi2_strstatus(ctx->dev, status);
1206 ecode = -EIO;
1207 }
1208
1209 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1210 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1211 edesc->src_nents > 1 ? 100 : ivsize, 1);
1212 caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ",
1213 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1214 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1215
1216 skcipher_unmap(ctx->dev, edesc, req);
1217
1218 /*
1219 * The crypto API expects us to set the IV (req->iv) to the last
1220 * ciphertext block. This is used e.g. by the CTS mode.
1221 */
1222 scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
1223 ivsize, 0);
1224
1225 qi_cache_free(edesc);
1226 skcipher_request_complete(req, ecode);
1227}
1228
1229static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1230{
1231 struct crypto_async_request *areq = cbk_ctx;
1232 struct skcipher_request *req = skcipher_request_cast(areq);
1233 struct caam_request *req_ctx = to_caam_req(areq);
1234 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1235 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1236 struct skcipher_edesc *edesc = req_ctx->edesc;
1237 int ecode = 0;
1238 int ivsize = crypto_skcipher_ivsize(skcipher);
1239
1240 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1241
1242 if (unlikely(status)) {
1243 caam_qi2_strstatus(ctx->dev, status);
1244 ecode = -EIO;
1245 }
1246
1247 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1248 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1249 edesc->src_nents > 1 ? 100 : ivsize, 1);
1250 caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ",
1251 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1252 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1253
1254 skcipher_unmap(ctx->dev, edesc, req);
1255 qi_cache_free(edesc);
1256 skcipher_request_complete(req, ecode);
1257}
1258
1259static int skcipher_encrypt(struct skcipher_request *req)
1260{
1261 struct skcipher_edesc *edesc;
1262 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1263 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1264 struct caam_request *caam_req = skcipher_request_ctx(req);
1265 int ret;
1266
1267 /* allocate extended descriptor */
1268 edesc = skcipher_edesc_alloc(req);
1269 if (IS_ERR(edesc))
1270 return PTR_ERR(edesc);
1271
1272 caam_req->flc = &ctx->flc[ENCRYPT];
1273 caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1274 caam_req->cbk = skcipher_encrypt_done;
1275 caam_req->ctx = &req->base;
1276 caam_req->edesc = edesc;
1277 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1278 if (ret != -EINPROGRESS &&
1279 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1280 skcipher_unmap(ctx->dev, edesc, req);
1281 qi_cache_free(edesc);
1282 }
1283
1284 return ret;
1285}
1286
1287static int skcipher_decrypt(struct skcipher_request *req)
1288{
1289 struct skcipher_edesc *edesc;
1290 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1291 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1292 struct caam_request *caam_req = skcipher_request_ctx(req);
1293 int ivsize = crypto_skcipher_ivsize(skcipher);
1294 int ret;
1295
1296 /* allocate extended descriptor */
1297 edesc = skcipher_edesc_alloc(req);
1298 if (IS_ERR(edesc))
1299 return PTR_ERR(edesc);
1300
1301 /*
1302 * The crypto API expects us to set the IV (req->iv) to the last
1303 * ciphertext block.
1304 */
1305 scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
1306 ivsize, 0);
1307
1308 caam_req->flc = &ctx->flc[DECRYPT];
1309 caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1310 caam_req->cbk = skcipher_decrypt_done;
1311 caam_req->ctx = &req->base;
1312 caam_req->edesc = edesc;
1313 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1314 if (ret != -EINPROGRESS &&
1315 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1316 skcipher_unmap(ctx->dev, edesc, req);
1317 qi_cache_free(edesc);
1318 }
1319
1320 return ret;
1321}
1322
933static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam, 1323static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
934 bool uses_dkp) 1324 bool uses_dkp)
935{ 1325{
@@ -958,6 +1348,16 @@ static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
958 return 0; 1348 return 0;
959} 1349}
960 1350
1351static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
1352{
1353 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1354 struct caam_skcipher_alg *caam_alg =
1355 container_of(alg, typeof(*caam_alg), skcipher);
1356
1357 crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
1358 return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
1359}
1360
961static int caam_cra_init_aead(struct crypto_aead *tfm) 1361static int caam_cra_init_aead(struct crypto_aead *tfm)
962{ 1362{
963 struct aead_alg *alg = crypto_aead_alg(tfm); 1363 struct aead_alg *alg = crypto_aead_alg(tfm);
@@ -976,11 +1376,124 @@ static void caam_exit_common(struct caam_ctx *ctx)
976 DMA_ATTR_SKIP_CPU_SYNC); 1376 DMA_ATTR_SKIP_CPU_SYNC);
977} 1377}
978 1378
1379static void caam_cra_exit(struct crypto_skcipher *tfm)
1380{
1381 caam_exit_common(crypto_skcipher_ctx(tfm));
1382}
1383
979static void caam_cra_exit_aead(struct crypto_aead *tfm) 1384static void caam_cra_exit_aead(struct crypto_aead *tfm)
980{ 1385{
981 caam_exit_common(crypto_aead_ctx(tfm)); 1386 caam_exit_common(crypto_aead_ctx(tfm));
982} 1387}
983 1388
1389static struct caam_skcipher_alg driver_algs[] = {
1390 {
1391 .skcipher = {
1392 .base = {
1393 .cra_name = "cbc(aes)",
1394 .cra_driver_name = "cbc-aes-caam-qi2",
1395 .cra_blocksize = AES_BLOCK_SIZE,
1396 },
1397 .setkey = skcipher_setkey,
1398 .encrypt = skcipher_encrypt,
1399 .decrypt = skcipher_decrypt,
1400 .min_keysize = AES_MIN_KEY_SIZE,
1401 .max_keysize = AES_MAX_KEY_SIZE,
1402 .ivsize = AES_BLOCK_SIZE,
1403 },
1404 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1405 },
1406 {
1407 .skcipher = {
1408 .base = {
1409 .cra_name = "cbc(des3_ede)",
1410 .cra_driver_name = "cbc-3des-caam-qi2",
1411 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1412 },
1413 .setkey = skcipher_setkey,
1414 .encrypt = skcipher_encrypt,
1415 .decrypt = skcipher_decrypt,
1416 .min_keysize = DES3_EDE_KEY_SIZE,
1417 .max_keysize = DES3_EDE_KEY_SIZE,
1418 .ivsize = DES3_EDE_BLOCK_SIZE,
1419 },
1420 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1421 },
1422 {
1423 .skcipher = {
1424 .base = {
1425 .cra_name = "cbc(des)",
1426 .cra_driver_name = "cbc-des-caam-qi2",
1427 .cra_blocksize = DES_BLOCK_SIZE,
1428 },
1429 .setkey = skcipher_setkey,
1430 .encrypt = skcipher_encrypt,
1431 .decrypt = skcipher_decrypt,
1432 .min_keysize = DES_KEY_SIZE,
1433 .max_keysize = DES_KEY_SIZE,
1434 .ivsize = DES_BLOCK_SIZE,
1435 },
1436 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1437 },
1438 {
1439 .skcipher = {
1440 .base = {
1441 .cra_name = "ctr(aes)",
1442 .cra_driver_name = "ctr-aes-caam-qi2",
1443 .cra_blocksize = 1,
1444 },
1445 .setkey = skcipher_setkey,
1446 .encrypt = skcipher_encrypt,
1447 .decrypt = skcipher_decrypt,
1448 .min_keysize = AES_MIN_KEY_SIZE,
1449 .max_keysize = AES_MAX_KEY_SIZE,
1450 .ivsize = AES_BLOCK_SIZE,
1451 .chunksize = AES_BLOCK_SIZE,
1452 },
1453 .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1454 OP_ALG_AAI_CTR_MOD128,
1455 },
1456 {
1457 .skcipher = {
1458 .base = {
1459 .cra_name = "rfc3686(ctr(aes))",
1460 .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1461 .cra_blocksize = 1,
1462 },
1463 .setkey = skcipher_setkey,
1464 .encrypt = skcipher_encrypt,
1465 .decrypt = skcipher_decrypt,
1466 .min_keysize = AES_MIN_KEY_SIZE +
1467 CTR_RFC3686_NONCE_SIZE,
1468 .max_keysize = AES_MAX_KEY_SIZE +
1469 CTR_RFC3686_NONCE_SIZE,
1470 .ivsize = CTR_RFC3686_IV_SIZE,
1471 .chunksize = AES_BLOCK_SIZE,
1472 },
1473 .caam = {
1474 .class1_alg_type = OP_ALG_ALGSEL_AES |
1475 OP_ALG_AAI_CTR_MOD128,
1476 .rfc3686 = true,
1477 },
1478 },
1479 {
1480 .skcipher = {
1481 .base = {
1482 .cra_name = "xts(aes)",
1483 .cra_driver_name = "xts-aes-caam-qi2",
1484 .cra_blocksize = AES_BLOCK_SIZE,
1485 },
1486 .setkey = xts_skcipher_setkey,
1487 .encrypt = skcipher_encrypt,
1488 .decrypt = skcipher_decrypt,
1489 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1490 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1491 .ivsize = AES_BLOCK_SIZE,
1492 },
1493 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1494 }
1495};
1496
984static struct caam_aead_alg driver_aeads[] = { 1497static struct caam_aead_alg driver_aeads[] = {
985 { 1498 {
986 .aead = { 1499 .aead = {
@@ -2143,6 +2656,19 @@ static struct caam_aead_alg driver_aeads[] = {
2143 }, 2656 },
2144}; 2657};
2145 2658
2659static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
2660{
2661 struct skcipher_alg *alg = &t_alg->skcipher;
2662
2663 alg->base.cra_module = THIS_MODULE;
2664 alg->base.cra_priority = CAAM_CRA_PRIORITY;
2665 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2666 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2667
2668 alg->init = caam_cra_init_skcipher;
2669 alg->exit = caam_cra_exit;
2670}
2671
2146static void caam_aead_alg_init(struct caam_aead_alg *t_alg) 2672static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2147{ 2673{
2148 struct aead_alg *alg = &t_alg->aead; 2674 struct aead_alg *alg = &t_alg->aead;
@@ -2701,6 +3227,35 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
2701 } 3227 }
2702 3228
2703 /* register crypto algorithms the device supports */ 3229 /* register crypto algorithms the device supports */
3230 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3231 struct caam_skcipher_alg *t_alg = driver_algs + i;
3232 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
3233
3234 /* Skip DES algorithms if not supported by device */
3235 if (!priv->sec_attr.des_acc_num &&
3236 (alg_sel == OP_ALG_ALGSEL_3DES ||
3237 alg_sel == OP_ALG_ALGSEL_DES))
3238 continue;
3239
3240 /* Skip AES algorithms if not supported by device */
3241 if (!priv->sec_attr.aes_acc_num &&
3242 alg_sel == OP_ALG_ALGSEL_AES)
3243 continue;
3244
3245 t_alg->caam.dev = dev;
3246 caam_skcipher_alg_init(t_alg);
3247
3248 err = crypto_register_skcipher(&t_alg->skcipher);
3249 if (err) {
3250 dev_warn(dev, "%s alg registration failed: %d\n",
3251 t_alg->skcipher.base.cra_driver_name, err);
3252 continue;
3253 }
3254
3255 t_alg->registered = true;
3256 registered = true;
3257 }
3258
2704 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { 3259 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2705 struct caam_aead_alg *t_alg = driver_aeads + i; 3260 struct caam_aead_alg *t_alg = driver_aeads + i;
2706 u32 c1_alg_sel = t_alg->caam.class1_alg_type & 3261 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
@@ -2774,6 +3329,13 @@ static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
2774 crypto_unregister_aead(&t_alg->aead); 3329 crypto_unregister_aead(&t_alg->aead);
2775 } 3330 }
2776 3331
3332 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3333 struct caam_skcipher_alg *t_alg = driver_algs + i;
3334
3335 if (t_alg->registered)
3336 crypto_unregister_skcipher(&t_alg->skcipher);
3337 }
3338
2777 dpaa2_dpseci_disable(priv); 3339 dpaa2_dpseci_disable(priv);
2778 dpaa2_dpseci_dpio_free(priv); 3340 dpaa2_dpseci_dpio_free(priv);
2779 dpaa2_dpseci_free(priv); 3341 dpaa2_dpseci_free(priv);
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
index 811d88e10918..71a08330eb09 100644
--- a/drivers/crypto/caam/caamalg_qi2.h
+++ b/drivers/crypto/caam/caamalg_qi2.h
@@ -140,6 +140,24 @@ struct aead_edesc {
140 struct dpaa2_sg_entry sgt[0]; 140 struct dpaa2_sg_entry sgt[0];
141}; 141};
142 142
143/*
144 * skcipher_edesc - s/w-extended skcipher descriptor
145 * @src_nents: number of segments in input scatterlist
146 * @dst_nents: number of segments in output scatterlist
147 * @iv_dma: dma address of iv for checking continuity and link table
148 * @qm_sg_bytes: length of dma mapped qm_sg space
149 * @qm_sg_dma: I/O virtual address of h/w link table
150 * @sgt: the h/w link table, followed by IV
151 */
152struct skcipher_edesc {
153 int src_nents;
154 int dst_nents;
155 dma_addr_t iv_dma;
156 int qm_sg_bytes;
157 dma_addr_t qm_sg_dma;
158 struct dpaa2_sg_entry sgt[0];
159};
160
143/** 161/**
144 * caam_flc - Flow Context (FLC) 162 * caam_flc - Flow Context (FLC)
145 * @flc: Flow Context options 163 * @flc: Flow Context options
@@ -167,7 +185,7 @@ enum optype {
167 * @flc_dma: I/O virtual address of Flow Context 185 * @flc_dma: I/O virtual address of Flow Context
168 * @cbk: Callback function to invoke when job is completed 186 * @cbk: Callback function to invoke when job is completed
169 * @ctx: arbit context attached with request by the application 187 * @ctx: arbit context attached with request by the application
170 * @edesc: extended descriptor; points to aead_edesc 188 * @edesc: extended descriptor; points to one of {skcipher,aead}_edesc
171 */ 189 */
172struct caam_request { 190struct caam_request {
173 struct dpaa2_fl_entry fd_flt[2]; 191 struct dpaa2_fl_entry fd_flt[2];