aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/crypto/talitos.c380
1 files changed, 342 insertions, 38 deletions
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 9833961a247e..a0b0a6319088 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -44,6 +44,8 @@
44#include <crypto/sha.h> 44#include <crypto/sha.h>
45#include <crypto/aead.h> 45#include <crypto/aead.h>
46#include <crypto/authenc.h> 46#include <crypto/authenc.h>
47#include <crypto/skcipher.h>
48#include <crypto/scatterwalk.h>
47 49
48#include "talitos.h" 50#include "talitos.h"
49 51
@@ -755,12 +757,62 @@ badkey:
755struct talitos_edesc { 757struct talitos_edesc {
756 int src_nents; 758 int src_nents;
757 int dst_nents; 759 int dst_nents;
760 int src_is_chained;
761 int dst_is_chained;
758 int dma_len; 762 int dma_len;
759 dma_addr_t dma_link_tbl; 763 dma_addr_t dma_link_tbl;
760 struct talitos_desc desc; 764 struct talitos_desc desc;
761 struct talitos_ptr link_tbl[0]; 765 struct talitos_ptr link_tbl[0];
762}; 766};
763 767
768static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
769 unsigned int nents, enum dma_data_direction dir,
770 int chained)
771{
772 if (unlikely(chained))
773 while (sg) {
774 dma_map_sg(dev, sg, 1, dir);
775 sg = scatterwalk_sg_next(sg);
776 }
777 else
778 dma_map_sg(dev, sg, nents, dir);
779 return nents;
780}
781
782static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
783 enum dma_data_direction dir)
784{
785 while (sg) {
786 dma_unmap_sg(dev, sg, 1, dir);
787 sg = scatterwalk_sg_next(sg);
788 }
789}
790
791static void talitos_sg_unmap(struct device *dev,
792 struct talitos_edesc *edesc,
793 struct scatterlist *src,
794 struct scatterlist *dst)
795{
796 unsigned int src_nents = edesc->src_nents ? : 1;
797 unsigned int dst_nents = edesc->dst_nents ? : 1;
798
799 if (src != dst) {
800 if (edesc->src_is_chained)
801 talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
802 else
803 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
804
805 if (edesc->dst_is_chained)
806 talitos_unmap_sg_chain(dev, dst, DMA_FROM_DEVICE);
807 else
808 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
809 } else
810 if (edesc->src_is_chained)
811 talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
812 else
813 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
814}
815
764static void ipsec_esp_unmap(struct device *dev, 816static void ipsec_esp_unmap(struct device *dev,
765 struct talitos_edesc *edesc, 817 struct talitos_edesc *edesc,
766 struct aead_request *areq) 818 struct aead_request *areq)
@@ -772,15 +824,7 @@ static void ipsec_esp_unmap(struct device *dev,
772 824
773 dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE); 825 dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE);
774 826
775 if (areq->src != areq->dst) { 827 talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
776 dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1,
777 DMA_TO_DEVICE);
778 dma_unmap_sg(dev, areq->dst, edesc->dst_nents ? : 1,
779 DMA_FROM_DEVICE);
780 } else {
781 dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1,
782 DMA_BIDIRECTIONAL);
783 }
784 828
785 if (edesc->dma_len) 829 if (edesc->dma_len)
786 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, 830 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
@@ -886,7 +930,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
886 link_tbl_ptr->j_extent = 0; 930 link_tbl_ptr->j_extent = 0;
887 link_tbl_ptr++; 931 link_tbl_ptr++;
888 cryptlen -= sg_dma_len(sg); 932 cryptlen -= sg_dma_len(sg);
889 sg = sg_next(sg); 933 sg = scatterwalk_sg_next(sg);
890 } 934 }
891 935
892 /* adjust (decrease) last one (or two) entry's len to cryptlen */ 936 /* adjust (decrease) last one (or two) entry's len to cryptlen */
@@ -952,12 +996,11 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
952 desc->ptr[4].len = cpu_to_be16(cryptlen); 996 desc->ptr[4].len = cpu_to_be16(cryptlen);
953 desc->ptr[4].j_extent = authsize; 997 desc->ptr[4].j_extent = authsize;
954 998
955 if (areq->src == areq->dst) 999 sg_count = talitos_map_sg(dev, areq->src,
956 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1, 1000 edesc->src_nents ? : 1,
957 DMA_BIDIRECTIONAL); 1001 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
958 else 1002 DMA_TO_DEVICE,
959 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1, 1003 edesc->src_is_chained);
960 DMA_TO_DEVICE);
961 1004
962 if (sg_count == 1) { 1005 if (sg_count == 1) {
963 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); 1006 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
@@ -986,8 +1029,10 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
986 desc->ptr[5].j_extent = authsize; 1029 desc->ptr[5].j_extent = authsize;
987 1030
988 if (areq->src != areq->dst) { 1031 if (areq->src != areq->dst) {
989 sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1, 1032 sg_count = talitos_map_sg(dev, areq->dst,
990 DMA_FROM_DEVICE); 1033 edesc->dst_nents ? : 1,
1034 DMA_FROM_DEVICE,
1035 edesc->dst_is_chained);
991 } 1036 }
992 1037
993 if (sg_count == 1) { 1038 if (sg_count == 1) {
@@ -1037,15 +1082,18 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1037/* 1082/*
1038 * derive number of elements in scatterlist 1083 * derive number of elements in scatterlist
1039 */ 1084 */
1040static int sg_count(struct scatterlist *sg_list, int nbytes) 1085static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
1041{ 1086{
1042 struct scatterlist *sg = sg_list; 1087 struct scatterlist *sg = sg_list;
1043 int sg_nents = 0; 1088 int sg_nents = 0;
1044 1089
1045 while (nbytes) { 1090 *chained = 0;
1091 while (nbytes > 0) {
1046 sg_nents++; 1092 sg_nents++;
1047 nbytes -= sg->length; 1093 nbytes -= sg->length;
1048 sg = sg_next(sg); 1094 if (!sg_is_last(sg) && (sg + 1)->length == 0)
1095 *chained = 1;
1096 sg = scatterwalk_sg_next(sg);
1049 } 1097 }
1050 1098
1051 return sg_nents; 1099 return sg_nents;
@@ -1054,28 +1102,32 @@ static int sg_count(struct scatterlist *sg_list, int nbytes)
1054/* 1102/*
1055 * allocate and map the extended descriptor 1103 * allocate and map the extended descriptor
1056 */ 1104 */
1057static struct talitos_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, 1105static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1058 int icv_stashing) 1106 struct scatterlist *src,
1107 struct scatterlist *dst,
1108 unsigned int cryptlen,
1109 unsigned int authsize,
1110 int icv_stashing,
1111 u32 cryptoflags)
1059{ 1112{
1060 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1061 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1062 struct talitos_edesc *edesc; 1113 struct talitos_edesc *edesc;
1063 int src_nents, dst_nents, alloc_len, dma_len; 1114 int src_nents, dst_nents, alloc_len, dma_len;
1064 gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : 1115 int src_chained, dst_chained = 0;
1116 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1065 GFP_ATOMIC; 1117 GFP_ATOMIC;
1066 1118
1067 if (areq->cryptlen + ctx->authsize > TALITOS_MAX_DATA_LEN) { 1119 if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) {
1068 dev_err(ctx->dev, "cryptlen exceeds h/w max limit\n"); 1120 dev_err(dev, "length exceeds h/w max limit\n");
1069 return ERR_PTR(-EINVAL); 1121 return ERR_PTR(-EINVAL);
1070 } 1122 }
1071 1123
1072 src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize); 1124 src_nents = sg_count(src, cryptlen + authsize, &src_chained);
1073 src_nents = (src_nents == 1) ? 0 : src_nents; 1125 src_nents = (src_nents == 1) ? 0 : src_nents;
1074 1126
1075 if (areq->dst == areq->src) { 1127 if (dst == src) {
1076 dst_nents = src_nents; 1128 dst_nents = src_nents;
1077 } else { 1129 } else {
1078 dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize); 1130 dst_nents = sg_count(dst, cryptlen + authsize, &dst_chained);
1079 dst_nents = (dst_nents == 1) ? 0 : dst_nents; 1131 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1080 } 1132 }
1081 1133
@@ -1087,28 +1139,41 @@ static struct talitos_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
1087 alloc_len = sizeof(struct talitos_edesc); 1139 alloc_len = sizeof(struct talitos_edesc);
1088 if (src_nents || dst_nents) { 1140 if (src_nents || dst_nents) {
1089 dma_len = (src_nents + dst_nents + 2) * 1141 dma_len = (src_nents + dst_nents + 2) *
1090 sizeof(struct talitos_ptr) + ctx->authsize; 1142 sizeof(struct talitos_ptr) + authsize;
1091 alloc_len += dma_len; 1143 alloc_len += dma_len;
1092 } else { 1144 } else {
1093 dma_len = 0; 1145 dma_len = 0;
1094 alloc_len += icv_stashing ? ctx->authsize : 0; 1146 alloc_len += icv_stashing ? authsize : 0;
1095 } 1147 }
1096 1148
1097 edesc = kmalloc(alloc_len, GFP_DMA | flags); 1149 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1098 if (!edesc) { 1150 if (!edesc) {
1099 dev_err(ctx->dev, "could not allocate edescriptor\n"); 1151 dev_err(dev, "could not allocate edescriptor\n");
1100 return ERR_PTR(-ENOMEM); 1152 return ERR_PTR(-ENOMEM);
1101 } 1153 }
1102 1154
1103 edesc->src_nents = src_nents; 1155 edesc->src_nents = src_nents;
1104 edesc->dst_nents = dst_nents; 1156 edesc->dst_nents = dst_nents;
1157 edesc->src_is_chained = src_chained;
1158 edesc->dst_is_chained = dst_chained;
1105 edesc->dma_len = dma_len; 1159 edesc->dma_len = dma_len;
1106 edesc->dma_link_tbl = dma_map_single(ctx->dev, &edesc->link_tbl[0], 1160 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1107 edesc->dma_len, DMA_BIDIRECTIONAL); 1161 edesc->dma_len, DMA_BIDIRECTIONAL);
1108 1162
1109 return edesc; 1163 return edesc;
1110} 1164}
1111 1165
1166static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq,
1167 int icv_stashing)
1168{
1169 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1170 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1171
1172 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1173 areq->cryptlen, ctx->authsize, icv_stashing,
1174 areq->base.flags);
1175}
1176
1112static int aead_encrypt(struct aead_request *req) 1177static int aead_encrypt(struct aead_request *req)
1113{ 1178{
1114 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 1179 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
@@ -1116,7 +1181,7 @@ static int aead_encrypt(struct aead_request *req)
1116 struct talitos_edesc *edesc; 1181 struct talitos_edesc *edesc;
1117 1182
1118 /* allocate extended descriptor */ 1183 /* allocate extended descriptor */
1119 edesc = ipsec_esp_edesc_alloc(req, 0); 1184 edesc = aead_edesc_alloc(req, 0);
1120 if (IS_ERR(edesc)) 1185 if (IS_ERR(edesc))
1121 return PTR_ERR(edesc); 1186 return PTR_ERR(edesc);
1122 1187
@@ -1141,7 +1206,7 @@ static int aead_decrypt(struct aead_request *req)
1141 req->cryptlen -= authsize; 1206 req->cryptlen -= authsize;
1142 1207
1143 /* allocate extended descriptor */ 1208 /* allocate extended descriptor */
1144 edesc = ipsec_esp_edesc_alloc(req, 1); 1209 edesc = aead_edesc_alloc(req, 1);
1145 if (IS_ERR(edesc)) 1210 if (IS_ERR(edesc))
1146 return PTR_ERR(edesc); 1211 return PTR_ERR(edesc);
1147 1212
@@ -1188,7 +1253,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *req)
1188 struct talitos_edesc *edesc; 1253 struct talitos_edesc *edesc;
1189 1254
1190 /* allocate extended descriptor */ 1255 /* allocate extended descriptor */
1191 edesc = ipsec_esp_edesc_alloc(areq, 0); 1256 edesc = aead_edesc_alloc(areq, 0);
1192 if (IS_ERR(edesc)) 1257 if (IS_ERR(edesc))
1193 return PTR_ERR(edesc); 1258 return PTR_ERR(edesc);
1194 1259
@@ -1203,6 +1268,199 @@ static int aead_givencrypt(struct aead_givcrypt_request *req)
1203 ipsec_esp_encrypt_done); 1268 ipsec_esp_encrypt_done);
1204} 1269}
1205 1270
1271static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1272 const u8 *key, unsigned int keylen)
1273{
1274 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1275 struct ablkcipher_alg *alg = crypto_ablkcipher_alg(cipher);
1276
1277 if (keylen > TALITOS_MAX_KEY_SIZE)
1278 goto badkey;
1279
1280 if (keylen < alg->min_keysize || keylen > alg->max_keysize)
1281 goto badkey;
1282
1283 memcpy(&ctx->key, key, keylen);
1284 ctx->keylen = keylen;
1285
1286 return 0;
1287
1288badkey:
1289 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1290 return -EINVAL;
1291}
1292
1293static void common_nonsnoop_unmap(struct device *dev,
1294 struct talitos_edesc *edesc,
1295 struct ablkcipher_request *areq)
1296{
1297 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1298 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1299 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1300
1301 talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
1302
1303 if (edesc->dma_len)
1304 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1305 DMA_BIDIRECTIONAL);
1306}
1307
1308static void ablkcipher_done(struct device *dev,
1309 struct talitos_desc *desc, void *context,
1310 int err)
1311{
1312 struct ablkcipher_request *areq = context;
1313 struct talitos_edesc *edesc =
1314 container_of(desc, struct talitos_edesc, desc);
1315
1316 common_nonsnoop_unmap(dev, edesc, areq);
1317
1318 kfree(edesc);
1319
1320 areq->base.complete(&areq->base, err);
1321}
1322
1323static int common_nonsnoop(struct talitos_edesc *edesc,
1324 struct ablkcipher_request *areq,
1325 u8 *giv,
1326 void (*callback) (struct device *dev,
1327 struct talitos_desc *desc,
1328 void *context, int error))
1329{
1330 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1331 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1332 struct device *dev = ctx->dev;
1333 struct talitos_desc *desc = &edesc->desc;
1334 unsigned int cryptlen = areq->nbytes;
1335 unsigned int ivsize;
1336 int sg_count, ret;
1337
1338 /* first DWORD empty */
1339 desc->ptr[0].len = 0;
1340 desc->ptr[0].ptr = 0;
1341 desc->ptr[0].j_extent = 0;
1342
1343 /* cipher iv */
1344 ivsize = crypto_ablkcipher_ivsize(cipher);
1345 map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, giv ?: areq->info, 0,
1346 DMA_TO_DEVICE);
1347
1348 /* cipher key */
1349 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1350 (char *)&ctx->key, 0, DMA_TO_DEVICE);
1351
1352 /*
1353 * cipher in
1354 */
1355 desc->ptr[3].len = cpu_to_be16(cryptlen);
1356 desc->ptr[3].j_extent = 0;
1357
1358 sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1359 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1360 : DMA_TO_DEVICE,
1361 edesc->src_is_chained);
1362
1363 if (sg_count == 1) {
1364 desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->src));
1365 } else {
1366 sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
1367 &edesc->link_tbl[0]);
1368 if (sg_count > 1) {
1369 desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
1370 desc->ptr[3].ptr = cpu_to_be32(edesc->dma_link_tbl);
1371 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1372 edesc->dma_len, DMA_BIDIRECTIONAL);
1373 } else {
1374 /* Only one segment now, so no link tbl needed */
1375 desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->src));
1376 }
1377 }
1378
1379 /* cipher out */
1380 desc->ptr[4].len = cpu_to_be16(cryptlen);
1381 desc->ptr[4].j_extent = 0;
1382
1383 if (areq->src != areq->dst)
1384 sg_count = talitos_map_sg(dev, areq->dst,
1385 edesc->dst_nents ? : 1,
1386 DMA_FROM_DEVICE,
1387 edesc->dst_is_chained);
1388
1389 if (sg_count == 1) {
1390 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->dst));
1391 } else {
1392 struct talitos_ptr *link_tbl_ptr =
1393 &edesc->link_tbl[edesc->src_nents + 1];
1394
1395 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1396 desc->ptr[4].ptr = cpu_to_be32((struct talitos_ptr *)
1397 edesc->dma_link_tbl +
1398 edesc->src_nents + 1);
1399 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1400 link_tbl_ptr);
1401 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1402 edesc->dma_len, DMA_BIDIRECTIONAL);
1403 }
1404
1405 /* iv out */
1406 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0,
1407 DMA_FROM_DEVICE);
1408
1409 /* last DWORD empty */
1410 desc->ptr[6].len = 0;
1411 desc->ptr[6].ptr = 0;
1412 desc->ptr[6].j_extent = 0;
1413
1414 ret = talitos_submit(dev, desc, callback, areq);
1415 if (ret != -EINPROGRESS) {
1416 common_nonsnoop_unmap(dev, edesc, areq);
1417 kfree(edesc);
1418 }
1419 return ret;
1420}
1421
1422static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *areq)
1423{
1424 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1425 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1426
1427 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, areq->nbytes,
1428 0, 0, areq->base.flags);
1429}
1430
1431static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1432{
1433 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1434 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1435 struct talitos_edesc *edesc;
1436
1437 /* allocate extended descriptor */
1438 edesc = ablkcipher_edesc_alloc(areq);
1439 if (IS_ERR(edesc))
1440 return PTR_ERR(edesc);
1441
1442 /* set encrypt */
1443 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1444
1445 return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
1446}
1447
1448static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1449{
1450 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1451 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1452 struct talitos_edesc *edesc;
1453
1454 /* allocate extended descriptor */
1455 edesc = ablkcipher_edesc_alloc(areq);
1456 if (IS_ERR(edesc))
1457 return PTR_ERR(edesc);
1458
1459 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1460
1461 return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
1462}
1463
1206struct talitos_alg_template { 1464struct talitos_alg_template {
1207 struct crypto_alg alg; 1465 struct crypto_alg alg;
1208 __be32 desc_hdr_template; 1466 __be32 desc_hdr_template;
@@ -1368,6 +1626,52 @@ static struct talitos_alg_template driver_algs[] = {
1368 DESC_HDR_MODE1_MDEU_INIT | 1626 DESC_HDR_MODE1_MDEU_INIT |
1369 DESC_HDR_MODE1_MDEU_PAD | 1627 DESC_HDR_MODE1_MDEU_PAD |
1370 DESC_HDR_MODE1_MDEU_MD5_HMAC, 1628 DESC_HDR_MODE1_MDEU_MD5_HMAC,
1629 },
1630 /* ABLKCIPHER algorithms. */
1631 {
1632 .alg = {
1633 .cra_name = "cbc(aes)",
1634 .cra_driver_name = "cbc-aes-talitos",
1635 .cra_blocksize = AES_BLOCK_SIZE,
1636 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1637 CRYPTO_ALG_ASYNC,
1638 .cra_type = &crypto_ablkcipher_type,
1639 .cra_ablkcipher = {
1640 .setkey = ablkcipher_setkey,
1641 .encrypt = ablkcipher_encrypt,
1642 .decrypt = ablkcipher_decrypt,
1643 .geniv = "eseqiv",
1644 .min_keysize = AES_MIN_KEY_SIZE,
1645 .max_keysize = AES_MAX_KEY_SIZE,
1646 .ivsize = AES_BLOCK_SIZE,
1647 }
1648 },
1649 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
1650 DESC_HDR_SEL0_AESU |
1651 DESC_HDR_MODE0_AESU_CBC,
1652 },
1653 {
1654 .alg = {
1655 .cra_name = "cbc(des3_ede)",
1656 .cra_driver_name = "cbc-3des-talitos",
1657 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1658 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1659 CRYPTO_ALG_ASYNC,
1660 .cra_type = &crypto_ablkcipher_type,
1661 .cra_ablkcipher = {
1662 .setkey = ablkcipher_setkey,
1663 .encrypt = ablkcipher_encrypt,
1664 .decrypt = ablkcipher_decrypt,
1665 .geniv = "eseqiv",
1666 .min_keysize = DES3_EDE_KEY_SIZE,
1667 .max_keysize = DES3_EDE_KEY_SIZE,
1668 .ivsize = DES3_EDE_BLOCK_SIZE,
1669 }
1670 },
1671 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
1672 DESC_HDR_SEL0_DEU |
1673 DESC_HDR_MODE0_DEU_CBC |
1674 DESC_HDR_MODE0_DEU_3DES,
1371 } 1675 }
1372}; 1676};
1373 1677