aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorHoria Geanta <horia.geanta@freescale.com>2013-11-28 08:11:16 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2013-11-28 09:25:17 -0500
commitbbf9c8934ba2bfd5fd809562f945deaf5a565898 (patch)
tree55103b32778954e8ae681c21ec693d90343521d8 /drivers/crypto
parent5638cabf3e4883f38dfb246c30980cebf694fbda (diff)
crypto: caam - fix aead sglen for case 'dst != src'
For aead case when source and destination buffers are different, there is an incorrect assumption that the source length includes the ICV length. Fix this, since it leads to an oops when using sg_count() to find the number of nents in the scatterlist: Unable to handle kernel paging request for data at address 0x00000004 Faulting instruction address: 0xf91f7634 Oops: Kernel access of bad area, sig: 11 [#1] SMP NR_CPUS=8 P4080 DS Modules linked in: caamalg(+) caam_jr caam CPU: 1 PID: 1053 Comm: cryptomgr_test Not tainted 3.11.0 #16 task: eeb24ab0 ti: eeafa000 task.ti: eeafa000 NIP: f91f7634 LR: f91f7f24 CTR: f91f7ef0 REGS: eeafbbc0 TRAP: 0300 Not tainted (3.11.0) MSR: 00029002 <CE,EE,ME> CR: 44044044 XER: 00000000 DEAR: 00000004, ESR: 00000000 GPR00: f91f7f24 eeafbc70 eeb24ab0 00000002 ee8e0900 ee8e0800 00000024 c45c4462 GPR08: 00000010 00000000 00000014 0c0e4000 24044044 00000000 00000000 c0691590 GPR16: eeab0000 eeb23000 00000000 00000000 00000000 00000001 00000001 eeafbcc8 GPR24: 000000d1 00000010 ee2d5000 ee49ea10 ee49ea10 ee46f640 ee46f640 c0691590 NIP [f91f7634] aead_edesc_alloc.constprop.14+0x144/0x780 [caamalg] LR [f91f7f24] aead_encrypt+0x34/0x288 [caamalg] Call Trace: [eeafbc70] [a1004000] 0xa1004000 (unreliable) [eeafbcc0] [f91f7f24] aead_encrypt+0x34/0x288 [caamalg] [eeafbcf0] [c020d77c] __test_aead+0x3ec/0xe20 [eeafbe20] [c020f35c] test_aead+0x6c/0xe0 [eeafbe40] [c020f420] alg_test_aead+0x50/0xd0 [eeafbe60] [c020e5e4] alg_test+0x114/0x2e0 [eeafbee0] [c020bd1c] cryptomgr_test+0x4c/0x60 [eeafbef0] [c0047058] kthread+0xa8/0xb0 [eeafbf40] [c000eb0c] ret_from_kernel_thread+0x5c/0x64 Instruction dump: 69084321 7d080034 5508d97e 69080001 0f080000 81290024 552807fe 0f080000 3a600001 5529003a 2f8a0000 40dd0028 <80e90004> 3ab50001 8109000c 70e30002 ---[ end trace b3c3e23925c7484e ]--- While here, add a tcrypt mode for making it easy to test authenc (needed for triggering case above). Signed-off-by: Horia Geanta <horia.geanta@freescale.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/caam/caamalg.c51
1 files changed, 29 insertions, 22 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 4f44b71b9e24..4cf5dec826e1 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -818,7 +818,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
818 ivsize, 1); 818 ivsize, 1);
819 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", 819 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
820 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), 820 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
821 req->cryptlen, 1); 821 req->cryptlen - ctx->authsize, 1);
822#endif 822#endif
823 823
824 if (err) { 824 if (err) {
@@ -972,12 +972,9 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
972 (edesc->src_nents ? : 1); 972 (edesc->src_nents ? : 1);
973 in_options = LDST_SGF; 973 in_options = LDST_SGF;
974 } 974 }
975 if (encrypt) 975
976 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + 976 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
977 req->cryptlen - authsize, in_options); 977 in_options);
978 else
979 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
980 req->cryptlen, in_options);
981 978
982 if (likely(req->src == req->dst)) { 979 if (likely(req->src == req->dst)) {
983 if (all_contig) { 980 if (all_contig) {
@@ -998,7 +995,8 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
998 } 995 }
999 } 996 }
1000 if (encrypt) 997 if (encrypt)
1001 append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options); 998 append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
999 out_options);
1002 else 1000 else
1003 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize, 1001 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
1004 out_options); 1002 out_options);
@@ -1048,8 +1046,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1048 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents; 1046 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
1049 in_options = LDST_SGF; 1047 in_options = LDST_SGF;
1050 } 1048 }
1051 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + 1049 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
1052 req->cryptlen - authsize, in_options); 1050 in_options);
1053 1051
1054 if (contig & GIV_DST_CONTIG) { 1052 if (contig & GIV_DST_CONTIG) {
1055 dst_dma = edesc->iv_dma; 1053 dst_dma = edesc->iv_dma;
@@ -1066,7 +1064,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1066 } 1064 }
1067 } 1065 }
1068 1066
1069 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options); 1067 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
1068 out_options);
1070} 1069}
1071 1070
1072/* 1071/*
@@ -1130,7 +1129,8 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1130 * allocate and map the aead extended descriptor 1129 * allocate and map the aead extended descriptor
1131 */ 1130 */
1132static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, 1131static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1133 int desc_bytes, bool *all_contig_ptr) 1132 int desc_bytes, bool *all_contig_ptr,
1133 bool encrypt)
1134{ 1134{
1135 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1135 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1136 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1136 struct caam_ctx *ctx = crypto_aead_ctx(aead);
@@ -1145,12 +1145,22 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1145 bool assoc_chained = false, src_chained = false, dst_chained = false; 1145 bool assoc_chained = false, src_chained = false, dst_chained = false;
1146 int ivsize = crypto_aead_ivsize(aead); 1146 int ivsize = crypto_aead_ivsize(aead);
1147 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; 1147 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1148 unsigned int authsize = ctx->authsize;
1148 1149
1149 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); 1150 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1150 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1151 1151
1152 if (unlikely(req->dst != req->src)) 1152 if (unlikely(req->dst != req->src)) {
1153 dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); 1153 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1154 dst_nents = sg_count(req->dst,
1155 req->cryptlen +
1156 (encrypt ? authsize : (-authsize)),
1157 &dst_chained);
1158 } else {
1159 src_nents = sg_count(req->src,
1160 req->cryptlen +
1161 (encrypt ? authsize : 0),
1162 &src_chained);
1163 }
1154 1164
1155 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, 1165 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1156 DMA_TO_DEVICE, assoc_chained); 1166 DMA_TO_DEVICE, assoc_chained);
@@ -1234,11 +1244,9 @@ static int aead_encrypt(struct aead_request *req)
1234 u32 *desc; 1244 u32 *desc;
1235 int ret = 0; 1245 int ret = 0;
1236 1246
1237 req->cryptlen += ctx->authsize;
1238
1239 /* allocate extended descriptor */ 1247 /* allocate extended descriptor */
1240 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * 1248 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1241 CAAM_CMD_SZ, &all_contig); 1249 CAAM_CMD_SZ, &all_contig, true);
1242 if (IS_ERR(edesc)) 1250 if (IS_ERR(edesc))
1243 return PTR_ERR(edesc); 1251 return PTR_ERR(edesc);
1244 1252
@@ -1275,7 +1283,7 @@ static int aead_decrypt(struct aead_request *req)
1275 1283
1276 /* allocate extended descriptor */ 1284 /* allocate extended descriptor */
1277 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * 1285 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1278 CAAM_CMD_SZ, &all_contig); 1286 CAAM_CMD_SZ, &all_contig, false);
1279 if (IS_ERR(edesc)) 1287 if (IS_ERR(edesc))
1280 return PTR_ERR(edesc); 1288 return PTR_ERR(edesc);
1281 1289
@@ -1332,7 +1340,8 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1332 src_nents = sg_count(req->src, req->cryptlen, &src_chained); 1340 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1333 1341
1334 if (unlikely(req->dst != req->src)) 1342 if (unlikely(req->dst != req->src))
1335 dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); 1343 dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
1344 &dst_chained);
1336 1345
1337 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, 1346 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1338 DMA_TO_DEVICE, assoc_chained); 1347 DMA_TO_DEVICE, assoc_chained);
@@ -1426,8 +1435,6 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq)
1426 u32 *desc; 1435 u32 *desc;
1427 int ret = 0; 1436 int ret = 0;
1428 1437
1429 req->cryptlen += ctx->authsize;
1430
1431 /* allocate extended descriptor */ 1438 /* allocate extended descriptor */
1432 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN * 1439 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
1433 CAAM_CMD_SZ, &contig); 1440 CAAM_CMD_SZ, &contig);