aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2014-01-02 14:45:27 -0500
committerOlof Johansson <olof@lixom.net>2014-01-02 14:45:27 -0500
commita7dedb4fead89f88255862a9bc46f4c7ec094c2e (patch)
treed07b09522364b895dc91ccd2d057668d2fa692c5 /drivers/crypto
parent9dc9b8e51f95fce0cc3d50ef80402203d2dcd68a (diff)
parent6075a8b2b6c32ddcb99b85189ae41ab2903e560f (diff)
Merge tag 'davinci-for-v3.14/gpio' of git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci into next/drivers
From Sekhar Nori: DaVinci GPIO driver updates --------------------------- This pull request contains updates to DaVinci GPIO driver and the resultant platform code changes. The updates include DT-conversion and changes to make the driver cross-platform ready. * tag 'davinci-for-v3.14/gpio' of git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci: gpio: davinci: don't create irq_domain in case of unbanked irqs gpio: davinci: use chained_irq_enter/chained_irq_exit API gpio: davinci: add OF support gpio: davinci: remove unused variable intc_irq_num gpio: davinci: convert to use irqdomain support. gpio: introduce GPIO_DAVINCI kconfig option gpio: davinci: get rid of DAVINCI_N_GPIO gpio: davinci: use {readl|writel}_relaxed() instead of __raw_* Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/caam/caamalg.c51
-rw-r--r--drivers/crypto/caam/jr.c1
-rw-r--r--drivers/crypto/talitos.c68
3 files changed, 69 insertions, 51 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 4f44b71b9e24..4cf5dec826e1 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -818,7 +818,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
818 ivsize, 1); 818 ivsize, 1);
819 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", 819 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
820 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), 820 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
821 req->cryptlen, 1); 821 req->cryptlen - ctx->authsize, 1);
822#endif 822#endif
823 823
824 if (err) { 824 if (err) {
@@ -972,12 +972,9 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
972 (edesc->src_nents ? : 1); 972 (edesc->src_nents ? : 1);
973 in_options = LDST_SGF; 973 in_options = LDST_SGF;
974 } 974 }
975 if (encrypt) 975
976 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + 976 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
977 req->cryptlen - authsize, in_options); 977 in_options);
978 else
979 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
980 req->cryptlen, in_options);
981 978
982 if (likely(req->src == req->dst)) { 979 if (likely(req->src == req->dst)) {
983 if (all_contig) { 980 if (all_contig) {
@@ -998,7 +995,8 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
998 } 995 }
999 } 996 }
1000 if (encrypt) 997 if (encrypt)
1001 append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options); 998 append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
999 out_options);
1002 else 1000 else
1003 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize, 1001 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
1004 out_options); 1002 out_options);
@@ -1048,8 +1046,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1048 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents; 1046 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
1049 in_options = LDST_SGF; 1047 in_options = LDST_SGF;
1050 } 1048 }
1051 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + 1049 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
1052 req->cryptlen - authsize, in_options); 1050 in_options);
1053 1051
1054 if (contig & GIV_DST_CONTIG) { 1052 if (contig & GIV_DST_CONTIG) {
1055 dst_dma = edesc->iv_dma; 1053 dst_dma = edesc->iv_dma;
@@ -1066,7 +1064,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1066 } 1064 }
1067 } 1065 }
1068 1066
1069 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options); 1067 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
1068 out_options);
1070} 1069}
1071 1070
1072/* 1071/*
@@ -1130,7 +1129,8 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1130 * allocate and map the aead extended descriptor 1129 * allocate and map the aead extended descriptor
1131 */ 1130 */
1132static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, 1131static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1133 int desc_bytes, bool *all_contig_ptr) 1132 int desc_bytes, bool *all_contig_ptr,
1133 bool encrypt)
1134{ 1134{
1135 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1135 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1136 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1136 struct caam_ctx *ctx = crypto_aead_ctx(aead);
@@ -1145,12 +1145,22 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1145 bool assoc_chained = false, src_chained = false, dst_chained = false; 1145 bool assoc_chained = false, src_chained = false, dst_chained = false;
1146 int ivsize = crypto_aead_ivsize(aead); 1146 int ivsize = crypto_aead_ivsize(aead);
1147 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; 1147 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1148 unsigned int authsize = ctx->authsize;
1148 1149
1149 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); 1150 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1150 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1151 1151
1152 if (unlikely(req->dst != req->src)) 1152 if (unlikely(req->dst != req->src)) {
1153 dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); 1153 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1154 dst_nents = sg_count(req->dst,
1155 req->cryptlen +
1156 (encrypt ? authsize : (-authsize)),
1157 &dst_chained);
1158 } else {
1159 src_nents = sg_count(req->src,
1160 req->cryptlen +
1161 (encrypt ? authsize : 0),
1162 &src_chained);
1163 }
1154 1164
1155 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, 1165 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1156 DMA_TO_DEVICE, assoc_chained); 1166 DMA_TO_DEVICE, assoc_chained);
@@ -1234,11 +1244,9 @@ static int aead_encrypt(struct aead_request *req)
1234 u32 *desc; 1244 u32 *desc;
1235 int ret = 0; 1245 int ret = 0;
1236 1246
1237 req->cryptlen += ctx->authsize;
1238
1239 /* allocate extended descriptor */ 1247 /* allocate extended descriptor */
1240 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * 1248 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1241 CAAM_CMD_SZ, &all_contig); 1249 CAAM_CMD_SZ, &all_contig, true);
1242 if (IS_ERR(edesc)) 1250 if (IS_ERR(edesc))
1243 return PTR_ERR(edesc); 1251 return PTR_ERR(edesc);
1244 1252
@@ -1275,7 +1283,7 @@ static int aead_decrypt(struct aead_request *req)
1275 1283
1276 /* allocate extended descriptor */ 1284 /* allocate extended descriptor */
1277 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * 1285 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1278 CAAM_CMD_SZ, &all_contig); 1286 CAAM_CMD_SZ, &all_contig, false);
1279 if (IS_ERR(edesc)) 1287 if (IS_ERR(edesc))
1280 return PTR_ERR(edesc); 1288 return PTR_ERR(edesc);
1281 1289
@@ -1332,7 +1340,8 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1332 src_nents = sg_count(req->src, req->cryptlen, &src_chained); 1340 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1333 1341
1334 if (unlikely(req->dst != req->src)) 1342 if (unlikely(req->dst != req->src))
1335 dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); 1343 dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
1344 &dst_chained);
1336 1345
1337 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, 1346 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1338 DMA_TO_DEVICE, assoc_chained); 1347 DMA_TO_DEVICE, assoc_chained);
@@ -1426,8 +1435,6 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq)
1426 u32 *desc; 1435 u32 *desc;
1427 int ret = 0; 1436 int ret = 0;
1428 1437
1429 req->cryptlen += ctx->authsize;
1430
1431 /* allocate extended descriptor */ 1438 /* allocate extended descriptor */
1432 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN * 1439 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
1433 CAAM_CMD_SZ, &contig); 1440 CAAM_CMD_SZ, &contig);
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index d23356d20e1c..1d80bd3636c5 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -6,6 +6,7 @@
6 */ 6 */
7 7
8#include <linux/of_irq.h> 8#include <linux/of_irq.h>
9#include <linux/of_address.h>
9 10
10#include "compat.h" 11#include "compat.h"
11#include "regs.h" 12#include "regs.h"
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 905de4427e7c..b44f4ddc565c 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -790,7 +790,7 @@ static void ipsec_esp_unmap(struct device *dev,
790 790
791 if (edesc->assoc_chained) 791 if (edesc->assoc_chained)
792 talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE); 792 talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE);
793 else 793 else if (areq->assoclen)
794 /* assoc_nents counts also for IV in non-contiguous cases */ 794 /* assoc_nents counts also for IV in non-contiguous cases */
795 dma_unmap_sg(dev, areq->assoc, 795 dma_unmap_sg(dev, areq->assoc,
796 edesc->assoc_nents ? edesc->assoc_nents - 1 : 1, 796 edesc->assoc_nents ? edesc->assoc_nents - 1 : 1,
@@ -973,7 +973,11 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
973 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 973 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
974 edesc->dma_len, DMA_BIDIRECTIONAL); 974 edesc->dma_len, DMA_BIDIRECTIONAL);
975 } else { 975 } else {
976 to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->assoc)); 976 if (areq->assoclen)
977 to_talitos_ptr(&desc->ptr[1],
978 sg_dma_address(areq->assoc));
979 else
980 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma);
977 desc->ptr[1].j_extent = 0; 981 desc->ptr[1].j_extent = 0;
978 } 982 }
979 983
@@ -1108,7 +1112,8 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1108 unsigned int authsize, 1112 unsigned int authsize,
1109 unsigned int ivsize, 1113 unsigned int ivsize,
1110 int icv_stashing, 1114 int icv_stashing,
1111 u32 cryptoflags) 1115 u32 cryptoflags,
1116 bool encrypt)
1112{ 1117{
1113 struct talitos_edesc *edesc; 1118 struct talitos_edesc *edesc;
1114 int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len; 1119 int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len;
@@ -1122,10 +1127,10 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1122 return ERR_PTR(-EINVAL); 1127 return ERR_PTR(-EINVAL);
1123 } 1128 }
1124 1129
1125 if (iv) 1130 if (ivsize)
1126 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); 1131 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1127 1132
1128 if (assoc) { 1133 if (assoclen) {
1129 /* 1134 /*
1130 * Currently it is assumed that iv is provided whenever assoc 1135 * Currently it is assumed that iv is provided whenever assoc
1131 * is. 1136 * is.
@@ -1141,19 +1146,17 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1141 assoc_nents = assoc_nents ? assoc_nents + 1 : 2; 1146 assoc_nents = assoc_nents ? assoc_nents + 1 : 2;
1142 } 1147 }
1143 1148
1144 src_nents = sg_count(src, cryptlen + authsize, &src_chained); 1149 if (!dst || dst == src) {
1145 src_nents = (src_nents == 1) ? 0 : src_nents; 1150 src_nents = sg_count(src, cryptlen + authsize, &src_chained);
1146 1151 src_nents = (src_nents == 1) ? 0 : src_nents;
1147 if (!dst) { 1152 dst_nents = dst ? src_nents : 0;
1148 dst_nents = 0; 1153 } else { /* dst && dst != src*/
1149 } else { 1154 src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize),
1150 if (dst == src) { 1155 &src_chained);
1151 dst_nents = src_nents; 1156 src_nents = (src_nents == 1) ? 0 : src_nents;
1152 } else { 1157 dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0),
1153 dst_nents = sg_count(dst, cryptlen + authsize, 1158 &dst_chained);
1154 &dst_chained); 1159 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1155 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1156 }
1157 } 1160 }
1158 1161
1159 /* 1162 /*
@@ -1173,9 +1176,16 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1173 1176
1174 edesc = kmalloc(alloc_len, GFP_DMA | flags); 1177 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1175 if (!edesc) { 1178 if (!edesc) {
1176 talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE); 1179 if (assoc_chained)
1180 talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE);
1181 else if (assoclen)
1182 dma_unmap_sg(dev, assoc,
1183 assoc_nents ? assoc_nents - 1 : 1,
1184 DMA_TO_DEVICE);
1185
1177 if (iv_dma) 1186 if (iv_dma)
1178 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); 1187 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1188
1179 dev_err(dev, "could not allocate edescriptor\n"); 1189 dev_err(dev, "could not allocate edescriptor\n");
1180 return ERR_PTR(-ENOMEM); 1190 return ERR_PTR(-ENOMEM);
1181 } 1191 }
@@ -1197,7 +1207,7 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1197} 1207}
1198 1208
1199static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, 1209static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1200 int icv_stashing) 1210 int icv_stashing, bool encrypt)
1201{ 1211{
1202 struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 1212 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1203 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1213 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
@@ -1206,7 +1216,7 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1206 return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst, 1216 return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst,
1207 iv, areq->assoclen, areq->cryptlen, 1217 iv, areq->assoclen, areq->cryptlen,
1208 ctx->authsize, ivsize, icv_stashing, 1218 ctx->authsize, ivsize, icv_stashing,
1209 areq->base.flags); 1219 areq->base.flags, encrypt);
1210} 1220}
1211 1221
1212static int aead_encrypt(struct aead_request *req) 1222static int aead_encrypt(struct aead_request *req)
@@ -1216,7 +1226,7 @@ static int aead_encrypt(struct aead_request *req)
1216 struct talitos_edesc *edesc; 1226 struct talitos_edesc *edesc;
1217 1227
1218 /* allocate extended descriptor */ 1228 /* allocate extended descriptor */
1219 edesc = aead_edesc_alloc(req, req->iv, 0); 1229 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1220 if (IS_ERR(edesc)) 1230 if (IS_ERR(edesc))
1221 return PTR_ERR(edesc); 1231 return PTR_ERR(edesc);
1222 1232
@@ -1239,7 +1249,7 @@ static int aead_decrypt(struct aead_request *req)
1239 req->cryptlen -= authsize; 1249 req->cryptlen -= authsize;
1240 1250
1241 /* allocate extended descriptor */ 1251 /* allocate extended descriptor */
1242 edesc = aead_edesc_alloc(req, req->iv, 1); 1252 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1243 if (IS_ERR(edesc)) 1253 if (IS_ERR(edesc))
1244 return PTR_ERR(edesc); 1254 return PTR_ERR(edesc);
1245 1255
@@ -1285,7 +1295,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *req)
1285 struct talitos_edesc *edesc; 1295 struct talitos_edesc *edesc;
1286 1296
1287 /* allocate extended descriptor */ 1297 /* allocate extended descriptor */
1288 edesc = aead_edesc_alloc(areq, req->giv, 0); 1298 edesc = aead_edesc_alloc(areq, req->giv, 0, true);
1289 if (IS_ERR(edesc)) 1299 if (IS_ERR(edesc))
1290 return PTR_ERR(edesc); 1300 return PTR_ERR(edesc);
1291 1301
@@ -1441,7 +1451,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
1441} 1451}
1442 1452
1443static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * 1453static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1444 areq) 1454 areq, bool encrypt)
1445{ 1455{
1446 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); 1456 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1447 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1457 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
@@ -1449,7 +1459,7 @@ static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1449 1459
1450 return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst, 1460 return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst,
1451 areq->info, 0, areq->nbytes, 0, ivsize, 0, 1461 areq->info, 0, areq->nbytes, 0, ivsize, 0,
1452 areq->base.flags); 1462 areq->base.flags, encrypt);
1453} 1463}
1454 1464
1455static int ablkcipher_encrypt(struct ablkcipher_request *areq) 1465static int ablkcipher_encrypt(struct ablkcipher_request *areq)
@@ -1459,7 +1469,7 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1459 struct talitos_edesc *edesc; 1469 struct talitos_edesc *edesc;
1460 1470
1461 /* allocate extended descriptor */ 1471 /* allocate extended descriptor */
1462 edesc = ablkcipher_edesc_alloc(areq); 1472 edesc = ablkcipher_edesc_alloc(areq, true);
1463 if (IS_ERR(edesc)) 1473 if (IS_ERR(edesc))
1464 return PTR_ERR(edesc); 1474 return PTR_ERR(edesc);
1465 1475
@@ -1476,7 +1486,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1476 struct talitos_edesc *edesc; 1486 struct talitos_edesc *edesc;
1477 1487
1478 /* allocate extended descriptor */ 1488 /* allocate extended descriptor */
1479 edesc = ablkcipher_edesc_alloc(areq); 1489 edesc = ablkcipher_edesc_alloc(areq, false);
1480 if (IS_ERR(edesc)) 1490 if (IS_ERR(edesc))
1481 return PTR_ERR(edesc); 1491 return PTR_ERR(edesc);
1482 1492
@@ -1628,7 +1638,7 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1628 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1638 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1629 1639
1630 return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0, 1640 return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0,
1631 nbytes, 0, 0, 0, areq->base.flags); 1641 nbytes, 0, 0, 0, areq->base.flags, false);
1632} 1642}
1633 1643
1634static int ahash_init(struct ahash_request *areq) 1644static int ahash_init(struct ahash_request *areq)