diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-12-04 11:53:09 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-12-04 11:53:09 -0500 |
commit | d644c026e6b21959af1f86b15206ae26c64c2e51 (patch) | |
tree | eadcb52fcdc2bdbf4b7cfe3dd69053f5727c3bc0 | |
parent | 1ab231b274ba51a54acebec23c6aded0f3cdf54e (diff) | |
parent | 8ec25c51291681bd68bdc290b35f2e61fa601c21 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto fixes from Herbert Xu:
"This push fixes a number of crashes triggered by a previous crypto
self-test update. It also fixes a build problem in the caam driver,
as well as a concurrency issue in s390.
Finally there is a pair of fixes to bugs in the crypto scatterwalk
code and authenc that may lead to crashes"
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
crypto: testmgr - fix sglen in test_aead for case 'dst != src'
crypto: talitos - fix aead sglen for case 'dst != src'
crypto: caam - fix aead sglen for case 'dst != src'
crypto: ccm - Fix handling of zero plaintext when computing mac
crypto: s390 - Fix aes-xts parameter corruption
crypto: talitos - corrrectly handle zero-length assoc data
crypto: scatterwalk - Set the chain pointer indication bit
crypto: authenc - Find proper IV address in ablkcipher callback
crypto: caam - Add missing Job Ring include
-rw-r--r-- | arch/s390/crypto/aes_s390.c | 31 | ||||
-rw-r--r-- | crypto/authenc.c | 7 | ||||
-rw-r--r-- | crypto/ccm.c | 3 | ||||
-rw-r--r-- | crypto/tcrypt.c | 4 | ||||
-rw-r--r-- | crypto/testmgr.c | 26 | ||||
-rw-r--r-- | drivers/crypto/caam/caamalg.c | 51 | ||||
-rw-r--r-- | drivers/crypto/caam/jr.c | 1 | ||||
-rw-r--r-- | drivers/crypto/talitos.c | 68 | ||||
-rw-r--r-- | include/crypto/scatterwalk.h | 1 |
9 files changed, 109 insertions, 83 deletions
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index 4363528dc8fd..b3feabd39f31 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c | |||
@@ -55,8 +55,7 @@ struct pcc_param { | |||
55 | 55 | ||
56 | struct s390_xts_ctx { | 56 | struct s390_xts_ctx { |
57 | u8 key[32]; | 57 | u8 key[32]; |
58 | u8 xts_param[16]; | 58 | u8 pcc_key[32]; |
59 | struct pcc_param pcc; | ||
60 | long enc; | 59 | long enc; |
61 | long dec; | 60 | long dec; |
62 | int key_len; | 61 | int key_len; |
@@ -591,7 +590,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |||
591 | xts_ctx->enc = KM_XTS_128_ENCRYPT; | 590 | xts_ctx->enc = KM_XTS_128_ENCRYPT; |
592 | xts_ctx->dec = KM_XTS_128_DECRYPT; | 591 | xts_ctx->dec = KM_XTS_128_DECRYPT; |
593 | memcpy(xts_ctx->key + 16, in_key, 16); | 592 | memcpy(xts_ctx->key + 16, in_key, 16); |
594 | memcpy(xts_ctx->pcc.key + 16, in_key + 16, 16); | 593 | memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16); |
595 | break; | 594 | break; |
596 | case 48: | 595 | case 48: |
597 | xts_ctx->enc = 0; | 596 | xts_ctx->enc = 0; |
@@ -602,7 +601,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |||
602 | xts_ctx->enc = KM_XTS_256_ENCRYPT; | 601 | xts_ctx->enc = KM_XTS_256_ENCRYPT; |
603 | xts_ctx->dec = KM_XTS_256_DECRYPT; | 602 | xts_ctx->dec = KM_XTS_256_DECRYPT; |
604 | memcpy(xts_ctx->key, in_key, 32); | 603 | memcpy(xts_ctx->key, in_key, 32); |
605 | memcpy(xts_ctx->pcc.key, in_key + 32, 32); | 604 | memcpy(xts_ctx->pcc_key, in_key + 32, 32); |
606 | break; | 605 | break; |
607 | default: | 606 | default: |
608 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | 607 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; |
@@ -621,29 +620,33 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func, | |||
621 | unsigned int nbytes = walk->nbytes; | 620 | unsigned int nbytes = walk->nbytes; |
622 | unsigned int n; | 621 | unsigned int n; |
623 | u8 *in, *out; | 622 | u8 *in, *out; |
624 | void *param; | 623 | struct pcc_param pcc_param; |
624 | struct { | ||
625 | u8 key[32]; | ||
626 | u8 init[16]; | ||
627 | } xts_param; | ||
625 | 628 | ||
626 | if (!nbytes) | 629 | if (!nbytes) |
627 | goto out; | 630 | goto out; |
628 | 631 | ||
629 | memset(xts_ctx->pcc.block, 0, sizeof(xts_ctx->pcc.block)); | 632 | memset(pcc_param.block, 0, sizeof(pcc_param.block)); |
630 | memset(xts_ctx->pcc.bit, 0, sizeof(xts_ctx->pcc.bit)); | 633 | memset(pcc_param.bit, 0, sizeof(pcc_param.bit)); |
631 | memset(xts_ctx->pcc.xts, 0, sizeof(xts_ctx->pcc.xts)); | 634 | memset(pcc_param.xts, 0, sizeof(pcc_param.xts)); |
632 | memcpy(xts_ctx->pcc.tweak, walk->iv, sizeof(xts_ctx->pcc.tweak)); | 635 | memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak)); |
633 | param = xts_ctx->pcc.key + offset; | 636 | memcpy(pcc_param.key, xts_ctx->pcc_key, 32); |
634 | ret = crypt_s390_pcc(func, param); | 637 | ret = crypt_s390_pcc(func, &pcc_param.key[offset]); |
635 | if (ret < 0) | 638 | if (ret < 0) |
636 | return -EIO; | 639 | return -EIO; |
637 | 640 | ||
638 | memcpy(xts_ctx->xts_param, xts_ctx->pcc.xts, 16); | 641 | memcpy(xts_param.key, xts_ctx->key, 32); |
639 | param = xts_ctx->key + offset; | 642 | memcpy(xts_param.init, pcc_param.xts, 16); |
640 | do { | 643 | do { |
641 | /* only use complete blocks */ | 644 | /* only use complete blocks */ |
642 | n = nbytes & ~(AES_BLOCK_SIZE - 1); | 645 | n = nbytes & ~(AES_BLOCK_SIZE - 1); |
643 | out = walk->dst.virt.addr; | 646 | out = walk->dst.virt.addr; |
644 | in = walk->src.virt.addr; | 647 | in = walk->src.virt.addr; |
645 | 648 | ||
646 | ret = crypt_s390_km(func, param, out, in, n); | 649 | ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n); |
647 | if (ret < 0 || ret != n) | 650 | if (ret < 0 || ret != n) |
648 | return -EIO; | 651 | return -EIO; |
649 | 652 | ||
diff --git a/crypto/authenc.c b/crypto/authenc.c index 1875e7026e8f..e1223559d5df 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c | |||
@@ -380,9 +380,10 @@ static void crypto_authenc_encrypt_done(struct crypto_async_request *req, | |||
380 | if (!err) { | 380 | if (!err) { |
381 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); | 381 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); |
382 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | 382 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); |
383 | struct ablkcipher_request *abreq = aead_request_ctx(areq); | 383 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(areq); |
384 | u8 *iv = (u8 *)(abreq + 1) + | 384 | struct ablkcipher_request *abreq = (void *)(areq_ctx->tail |
385 | crypto_ablkcipher_reqsize(ctx->enc); | 385 | + ctx->reqoff); |
386 | u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(ctx->enc); | ||
386 | 387 | ||
387 | err = crypto_authenc_genicv(areq, iv, 0); | 388 | err = crypto_authenc_genicv(areq, iv, 0); |
388 | } | 389 | } |
diff --git a/crypto/ccm.c b/crypto/ccm.c index 3e05499d183a..1df84217f7c9 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c | |||
@@ -271,7 +271,8 @@ static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain, | |||
271 | } | 271 | } |
272 | 272 | ||
273 | /* compute plaintext into mac */ | 273 | /* compute plaintext into mac */ |
274 | get_data_to_compute(cipher, pctx, plain, cryptlen); | 274 | if (cryptlen) |
275 | get_data_to_compute(cipher, pctx, plain, cryptlen); | ||
275 | 276 | ||
276 | out: | 277 | out: |
277 | return err; | 278 | return err; |
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 1ab8258fcf56..001f07cdb828 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c | |||
@@ -1242,6 +1242,10 @@ static int do_test(int m) | |||
1242 | ret += tcrypt_test("cmac(des3_ede)"); | 1242 | ret += tcrypt_test("cmac(des3_ede)"); |
1243 | break; | 1243 | break; |
1244 | 1244 | ||
1245 | case 155: | ||
1246 | ret += tcrypt_test("authenc(hmac(sha1),cbc(aes))"); | ||
1247 | break; | ||
1248 | |||
1245 | case 200: | 1249 | case 200: |
1246 | test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, | 1250 | test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, |
1247 | speed_template_16_24_32); | 1251 | speed_template_16_24_32); |
diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 432afc03e7c3..77955507f6f1 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c | |||
@@ -503,16 +503,16 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
503 | goto out; | 503 | goto out; |
504 | } | 504 | } |
505 | 505 | ||
506 | sg_init_one(&sg[0], input, | ||
507 | template[i].ilen + (enc ? authsize : 0)); | ||
508 | |||
509 | if (diff_dst) { | 506 | if (diff_dst) { |
510 | output = xoutbuf[0]; | 507 | output = xoutbuf[0]; |
511 | output += align_offset; | 508 | output += align_offset; |
509 | sg_init_one(&sg[0], input, template[i].ilen); | ||
512 | sg_init_one(&sgout[0], output, | 510 | sg_init_one(&sgout[0], output, |
511 | template[i].rlen); | ||
512 | } else { | ||
513 | sg_init_one(&sg[0], input, | ||
513 | template[i].ilen + | 514 | template[i].ilen + |
514 | (enc ? authsize : 0)); | 515 | (enc ? authsize : 0)); |
515 | } else { | ||
516 | output = input; | 516 | output = input; |
517 | } | 517 | } |
518 | 518 | ||
@@ -612,12 +612,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
612 | memcpy(q, template[i].input + temp, | 612 | memcpy(q, template[i].input + temp, |
613 | template[i].tap[k]); | 613 | template[i].tap[k]); |
614 | 614 | ||
615 | n = template[i].tap[k]; | ||
616 | if (k == template[i].np - 1 && enc) | ||
617 | n += authsize; | ||
618 | if (offset_in_page(q) + n < PAGE_SIZE) | ||
619 | q[n] = 0; | ||
620 | |||
621 | sg_set_buf(&sg[k], q, template[i].tap[k]); | 615 | sg_set_buf(&sg[k], q, template[i].tap[k]); |
622 | 616 | ||
623 | if (diff_dst) { | 617 | if (diff_dst) { |
@@ -625,13 +619,17 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
625 | offset_in_page(IDX[k]); | 619 | offset_in_page(IDX[k]); |
626 | 620 | ||
627 | memset(q, 0, template[i].tap[k]); | 621 | memset(q, 0, template[i].tap[k]); |
628 | if (offset_in_page(q) + n < PAGE_SIZE) | ||
629 | q[n] = 0; | ||
630 | 622 | ||
631 | sg_set_buf(&sgout[k], q, | 623 | sg_set_buf(&sgout[k], q, |
632 | template[i].tap[k]); | 624 | template[i].tap[k]); |
633 | } | 625 | } |
634 | 626 | ||
627 | n = template[i].tap[k]; | ||
628 | if (k == template[i].np - 1 && enc) | ||
629 | n += authsize; | ||
630 | if (offset_in_page(q) + n < PAGE_SIZE) | ||
631 | q[n] = 0; | ||
632 | |||
635 | temp += template[i].tap[k]; | 633 | temp += template[i].tap[k]; |
636 | } | 634 | } |
637 | 635 | ||
@@ -650,10 +648,10 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
650 | goto out; | 648 | goto out; |
651 | } | 649 | } |
652 | 650 | ||
653 | sg[k - 1].length += authsize; | ||
654 | |||
655 | if (diff_dst) | 651 | if (diff_dst) |
656 | sgout[k - 1].length += authsize; | 652 | sgout[k - 1].length += authsize; |
653 | else | ||
654 | sg[k - 1].length += authsize; | ||
657 | } | 655 | } |
658 | 656 | ||
659 | sg_init_table(asg, template[i].anp); | 657 | sg_init_table(asg, template[i].anp); |
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 4f44b71b9e24..4cf5dec826e1 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
@@ -818,7 +818,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
818 | ivsize, 1); | 818 | ivsize, 1); |
819 | print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", | 819 | print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", |
820 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), | 820 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), |
821 | req->cryptlen, 1); | 821 | req->cryptlen - ctx->authsize, 1); |
822 | #endif | 822 | #endif |
823 | 823 | ||
824 | if (err) { | 824 | if (err) { |
@@ -972,12 +972,9 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, | |||
972 | (edesc->src_nents ? : 1); | 972 | (edesc->src_nents ? : 1); |
973 | in_options = LDST_SGF; | 973 | in_options = LDST_SGF; |
974 | } | 974 | } |
975 | if (encrypt) | 975 | |
976 | append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + | 976 | append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen, |
977 | req->cryptlen - authsize, in_options); | 977 | in_options); |
978 | else | ||
979 | append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + | ||
980 | req->cryptlen, in_options); | ||
981 | 978 | ||
982 | if (likely(req->src == req->dst)) { | 979 | if (likely(req->src == req->dst)) { |
983 | if (all_contig) { | 980 | if (all_contig) { |
@@ -998,7 +995,8 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, | |||
998 | } | 995 | } |
999 | } | 996 | } |
1000 | if (encrypt) | 997 | if (encrypt) |
1001 | append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options); | 998 | append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize, |
999 | out_options); | ||
1002 | else | 1000 | else |
1003 | append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize, | 1001 | append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize, |
1004 | out_options); | 1002 | out_options); |
@@ -1048,8 +1046,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, | |||
1048 | sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents; | 1046 | sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents; |
1049 | in_options = LDST_SGF; | 1047 | in_options = LDST_SGF; |
1050 | } | 1048 | } |
1051 | append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + | 1049 | append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen, |
1052 | req->cryptlen - authsize, in_options); | 1050 | in_options); |
1053 | 1051 | ||
1054 | if (contig & GIV_DST_CONTIG) { | 1052 | if (contig & GIV_DST_CONTIG) { |
1055 | dst_dma = edesc->iv_dma; | 1053 | dst_dma = edesc->iv_dma; |
@@ -1066,7 +1064,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, | |||
1066 | } | 1064 | } |
1067 | } | 1065 | } |
1068 | 1066 | ||
1069 | append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options); | 1067 | append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize, |
1068 | out_options); | ||
1070 | } | 1069 | } |
1071 | 1070 | ||
1072 | /* | 1071 | /* |
@@ -1130,7 +1129,8 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, | |||
1130 | * allocate and map the aead extended descriptor | 1129 | * allocate and map the aead extended descriptor |
1131 | */ | 1130 | */ |
1132 | static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | 1131 | static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, |
1133 | int desc_bytes, bool *all_contig_ptr) | 1132 | int desc_bytes, bool *all_contig_ptr, |
1133 | bool encrypt) | ||
1134 | { | 1134 | { |
1135 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 1135 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
1136 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 1136 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
@@ -1145,12 +1145,22 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
1145 | bool assoc_chained = false, src_chained = false, dst_chained = false; | 1145 | bool assoc_chained = false, src_chained = false, dst_chained = false; |
1146 | int ivsize = crypto_aead_ivsize(aead); | 1146 | int ivsize = crypto_aead_ivsize(aead); |
1147 | int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; | 1147 | int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; |
1148 | unsigned int authsize = ctx->authsize; | ||
1148 | 1149 | ||
1149 | assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); | 1150 | assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); |
1150 | src_nents = sg_count(req->src, req->cryptlen, &src_chained); | ||
1151 | 1151 | ||
1152 | if (unlikely(req->dst != req->src)) | 1152 | if (unlikely(req->dst != req->src)) { |
1153 | dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); | 1153 | src_nents = sg_count(req->src, req->cryptlen, &src_chained); |
1154 | dst_nents = sg_count(req->dst, | ||
1155 | req->cryptlen + | ||
1156 | (encrypt ? authsize : (-authsize)), | ||
1157 | &dst_chained); | ||
1158 | } else { | ||
1159 | src_nents = sg_count(req->src, | ||
1160 | req->cryptlen + | ||
1161 | (encrypt ? authsize : 0), | ||
1162 | &src_chained); | ||
1163 | } | ||
1154 | 1164 | ||
1155 | sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, | 1165 | sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, |
1156 | DMA_TO_DEVICE, assoc_chained); | 1166 | DMA_TO_DEVICE, assoc_chained); |
@@ -1234,11 +1244,9 @@ static int aead_encrypt(struct aead_request *req) | |||
1234 | u32 *desc; | 1244 | u32 *desc; |
1235 | int ret = 0; | 1245 | int ret = 0; |
1236 | 1246 | ||
1237 | req->cryptlen += ctx->authsize; | ||
1238 | |||
1239 | /* allocate extended descriptor */ | 1247 | /* allocate extended descriptor */ |
1240 | edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * | 1248 | edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * |
1241 | CAAM_CMD_SZ, &all_contig); | 1249 | CAAM_CMD_SZ, &all_contig, true); |
1242 | if (IS_ERR(edesc)) | 1250 | if (IS_ERR(edesc)) |
1243 | return PTR_ERR(edesc); | 1251 | return PTR_ERR(edesc); |
1244 | 1252 | ||
@@ -1275,7 +1283,7 @@ static int aead_decrypt(struct aead_request *req) | |||
1275 | 1283 | ||
1276 | /* allocate extended descriptor */ | 1284 | /* allocate extended descriptor */ |
1277 | edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * | 1285 | edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * |
1278 | CAAM_CMD_SZ, &all_contig); | 1286 | CAAM_CMD_SZ, &all_contig, false); |
1279 | if (IS_ERR(edesc)) | 1287 | if (IS_ERR(edesc)) |
1280 | return PTR_ERR(edesc); | 1288 | return PTR_ERR(edesc); |
1281 | 1289 | ||
@@ -1332,7 +1340,8 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request | |||
1332 | src_nents = sg_count(req->src, req->cryptlen, &src_chained); | 1340 | src_nents = sg_count(req->src, req->cryptlen, &src_chained); |
1333 | 1341 | ||
1334 | if (unlikely(req->dst != req->src)) | 1342 | if (unlikely(req->dst != req->src)) |
1335 | dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); | 1343 | dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize, |
1344 | &dst_chained); | ||
1336 | 1345 | ||
1337 | sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, | 1346 | sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, |
1338 | DMA_TO_DEVICE, assoc_chained); | 1347 | DMA_TO_DEVICE, assoc_chained); |
@@ -1426,8 +1435,6 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq) | |||
1426 | u32 *desc; | 1435 | u32 *desc; |
1427 | int ret = 0; | 1436 | int ret = 0; |
1428 | 1437 | ||
1429 | req->cryptlen += ctx->authsize; | ||
1430 | |||
1431 | /* allocate extended descriptor */ | 1438 | /* allocate extended descriptor */ |
1432 | edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN * | 1439 | edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN * |
1433 | CAAM_CMD_SZ, &contig); | 1440 | CAAM_CMD_SZ, &contig); |
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index d23356d20e1c..1d80bd3636c5 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c | |||
@@ -6,6 +6,7 @@ | |||
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/of_irq.h> | 8 | #include <linux/of_irq.h> |
9 | #include <linux/of_address.h> | ||
9 | 10 | ||
10 | #include "compat.h" | 11 | #include "compat.h" |
11 | #include "regs.h" | 12 | #include "regs.h" |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 905de4427e7c..b44f4ddc565c 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -790,7 +790,7 @@ static void ipsec_esp_unmap(struct device *dev, | |||
790 | 790 | ||
791 | if (edesc->assoc_chained) | 791 | if (edesc->assoc_chained) |
792 | talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE); | 792 | talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE); |
793 | else | 793 | else if (areq->assoclen) |
794 | /* assoc_nents counts also for IV in non-contiguous cases */ | 794 | /* assoc_nents counts also for IV in non-contiguous cases */ |
795 | dma_unmap_sg(dev, areq->assoc, | 795 | dma_unmap_sg(dev, areq->assoc, |
796 | edesc->assoc_nents ? edesc->assoc_nents - 1 : 1, | 796 | edesc->assoc_nents ? edesc->assoc_nents - 1 : 1, |
@@ -973,7 +973,11 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
973 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, | 973 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, |
974 | edesc->dma_len, DMA_BIDIRECTIONAL); | 974 | edesc->dma_len, DMA_BIDIRECTIONAL); |
975 | } else { | 975 | } else { |
976 | to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->assoc)); | 976 | if (areq->assoclen) |
977 | to_talitos_ptr(&desc->ptr[1], | ||
978 | sg_dma_address(areq->assoc)); | ||
979 | else | ||
980 | to_talitos_ptr(&desc->ptr[1], edesc->iv_dma); | ||
977 | desc->ptr[1].j_extent = 0; | 981 | desc->ptr[1].j_extent = 0; |
978 | } | 982 | } |
979 | 983 | ||
@@ -1108,7 +1112,8 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1108 | unsigned int authsize, | 1112 | unsigned int authsize, |
1109 | unsigned int ivsize, | 1113 | unsigned int ivsize, |
1110 | int icv_stashing, | 1114 | int icv_stashing, |
1111 | u32 cryptoflags) | 1115 | u32 cryptoflags, |
1116 | bool encrypt) | ||
1112 | { | 1117 | { |
1113 | struct talitos_edesc *edesc; | 1118 | struct talitos_edesc *edesc; |
1114 | int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len; | 1119 | int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len; |
@@ -1122,10 +1127,10 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1122 | return ERR_PTR(-EINVAL); | 1127 | return ERR_PTR(-EINVAL); |
1123 | } | 1128 | } |
1124 | 1129 | ||
1125 | if (iv) | 1130 | if (ivsize) |
1126 | iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); | 1131 | iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); |
1127 | 1132 | ||
1128 | if (assoc) { | 1133 | if (assoclen) { |
1129 | /* | 1134 | /* |
1130 | * Currently it is assumed that iv is provided whenever assoc | 1135 | * Currently it is assumed that iv is provided whenever assoc |
1131 | * is. | 1136 | * is. |
@@ -1141,19 +1146,17 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1141 | assoc_nents = assoc_nents ? assoc_nents + 1 : 2; | 1146 | assoc_nents = assoc_nents ? assoc_nents + 1 : 2; |
1142 | } | 1147 | } |
1143 | 1148 | ||
1144 | src_nents = sg_count(src, cryptlen + authsize, &src_chained); | 1149 | if (!dst || dst == src) { |
1145 | src_nents = (src_nents == 1) ? 0 : src_nents; | 1150 | src_nents = sg_count(src, cryptlen + authsize, &src_chained); |
1146 | 1151 | src_nents = (src_nents == 1) ? 0 : src_nents; | |
1147 | if (!dst) { | 1152 | dst_nents = dst ? src_nents : 0; |
1148 | dst_nents = 0; | 1153 | } else { /* dst && dst != src*/ |
1149 | } else { | 1154 | src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize), |
1150 | if (dst == src) { | 1155 | &src_chained); |
1151 | dst_nents = src_nents; | 1156 | src_nents = (src_nents == 1) ? 0 : src_nents; |
1152 | } else { | 1157 | dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0), |
1153 | dst_nents = sg_count(dst, cryptlen + authsize, | 1158 | &dst_chained); |
1154 | &dst_chained); | 1159 | dst_nents = (dst_nents == 1) ? 0 : dst_nents; |
1155 | dst_nents = (dst_nents == 1) ? 0 : dst_nents; | ||
1156 | } | ||
1157 | } | 1160 | } |
1158 | 1161 | ||
1159 | /* | 1162 | /* |
@@ -1173,9 +1176,16 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1173 | 1176 | ||
1174 | edesc = kmalloc(alloc_len, GFP_DMA | flags); | 1177 | edesc = kmalloc(alloc_len, GFP_DMA | flags); |
1175 | if (!edesc) { | 1178 | if (!edesc) { |
1176 | talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE); | 1179 | if (assoc_chained) |
1180 | talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE); | ||
1181 | else if (assoclen) | ||
1182 | dma_unmap_sg(dev, assoc, | ||
1183 | assoc_nents ? assoc_nents - 1 : 1, | ||
1184 | DMA_TO_DEVICE); | ||
1185 | |||
1177 | if (iv_dma) | 1186 | if (iv_dma) |
1178 | dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); | 1187 | dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); |
1188 | |||
1179 | dev_err(dev, "could not allocate edescriptor\n"); | 1189 | dev_err(dev, "could not allocate edescriptor\n"); |
1180 | return ERR_PTR(-ENOMEM); | 1190 | return ERR_PTR(-ENOMEM); |
1181 | } | 1191 | } |
@@ -1197,7 +1207,7 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1197 | } | 1207 | } |
1198 | 1208 | ||
1199 | static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, | 1209 | static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, |
1200 | int icv_stashing) | 1210 | int icv_stashing, bool encrypt) |
1201 | { | 1211 | { |
1202 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); | 1212 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); |
1203 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 1213 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); |
@@ -1206,7 +1216,7 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, | |||
1206 | return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst, | 1216 | return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst, |
1207 | iv, areq->assoclen, areq->cryptlen, | 1217 | iv, areq->assoclen, areq->cryptlen, |
1208 | ctx->authsize, ivsize, icv_stashing, | 1218 | ctx->authsize, ivsize, icv_stashing, |
1209 | areq->base.flags); | 1219 | areq->base.flags, encrypt); |
1210 | } | 1220 | } |
1211 | 1221 | ||
1212 | static int aead_encrypt(struct aead_request *req) | 1222 | static int aead_encrypt(struct aead_request *req) |
@@ -1216,7 +1226,7 @@ static int aead_encrypt(struct aead_request *req) | |||
1216 | struct talitos_edesc *edesc; | 1226 | struct talitos_edesc *edesc; |
1217 | 1227 | ||
1218 | /* allocate extended descriptor */ | 1228 | /* allocate extended descriptor */ |
1219 | edesc = aead_edesc_alloc(req, req->iv, 0); | 1229 | edesc = aead_edesc_alloc(req, req->iv, 0, true); |
1220 | if (IS_ERR(edesc)) | 1230 | if (IS_ERR(edesc)) |
1221 | return PTR_ERR(edesc); | 1231 | return PTR_ERR(edesc); |
1222 | 1232 | ||
@@ -1239,7 +1249,7 @@ static int aead_decrypt(struct aead_request *req) | |||
1239 | req->cryptlen -= authsize; | 1249 | req->cryptlen -= authsize; |
1240 | 1250 | ||
1241 | /* allocate extended descriptor */ | 1251 | /* allocate extended descriptor */ |
1242 | edesc = aead_edesc_alloc(req, req->iv, 1); | 1252 | edesc = aead_edesc_alloc(req, req->iv, 1, false); |
1243 | if (IS_ERR(edesc)) | 1253 | if (IS_ERR(edesc)) |
1244 | return PTR_ERR(edesc); | 1254 | return PTR_ERR(edesc); |
1245 | 1255 | ||
@@ -1285,7 +1295,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *req) | |||
1285 | struct talitos_edesc *edesc; | 1295 | struct talitos_edesc *edesc; |
1286 | 1296 | ||
1287 | /* allocate extended descriptor */ | 1297 | /* allocate extended descriptor */ |
1288 | edesc = aead_edesc_alloc(areq, req->giv, 0); | 1298 | edesc = aead_edesc_alloc(areq, req->giv, 0, true); |
1289 | if (IS_ERR(edesc)) | 1299 | if (IS_ERR(edesc)) |
1290 | return PTR_ERR(edesc); | 1300 | return PTR_ERR(edesc); |
1291 | 1301 | ||
@@ -1441,7 +1451,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, | |||
1441 | } | 1451 | } |
1442 | 1452 | ||
1443 | static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * | 1453 | static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * |
1444 | areq) | 1454 | areq, bool encrypt) |
1445 | { | 1455 | { |
1446 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); | 1456 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); |
1447 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); | 1457 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); |
@@ -1449,7 +1459,7 @@ static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * | |||
1449 | 1459 | ||
1450 | return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst, | 1460 | return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst, |
1451 | areq->info, 0, areq->nbytes, 0, ivsize, 0, | 1461 | areq->info, 0, areq->nbytes, 0, ivsize, 0, |
1452 | areq->base.flags); | 1462 | areq->base.flags, encrypt); |
1453 | } | 1463 | } |
1454 | 1464 | ||
1455 | static int ablkcipher_encrypt(struct ablkcipher_request *areq) | 1465 | static int ablkcipher_encrypt(struct ablkcipher_request *areq) |
@@ -1459,7 +1469,7 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq) | |||
1459 | struct talitos_edesc *edesc; | 1469 | struct talitos_edesc *edesc; |
1460 | 1470 | ||
1461 | /* allocate extended descriptor */ | 1471 | /* allocate extended descriptor */ |
1462 | edesc = ablkcipher_edesc_alloc(areq); | 1472 | edesc = ablkcipher_edesc_alloc(areq, true); |
1463 | if (IS_ERR(edesc)) | 1473 | if (IS_ERR(edesc)) |
1464 | return PTR_ERR(edesc); | 1474 | return PTR_ERR(edesc); |
1465 | 1475 | ||
@@ -1476,7 +1486,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq) | |||
1476 | struct talitos_edesc *edesc; | 1486 | struct talitos_edesc *edesc; |
1477 | 1487 | ||
1478 | /* allocate extended descriptor */ | 1488 | /* allocate extended descriptor */ |
1479 | edesc = ablkcipher_edesc_alloc(areq); | 1489 | edesc = ablkcipher_edesc_alloc(areq, false); |
1480 | if (IS_ERR(edesc)) | 1490 | if (IS_ERR(edesc)) |
1481 | return PTR_ERR(edesc); | 1491 | return PTR_ERR(edesc); |
1482 | 1492 | ||
@@ -1628,7 +1638,7 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq, | |||
1628 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | 1638 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); |
1629 | 1639 | ||
1630 | return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0, | 1640 | return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0, |
1631 | nbytes, 0, 0, 0, areq->base.flags); | 1641 | nbytes, 0, 0, 0, areq->base.flags, false); |
1632 | } | 1642 | } |
1633 | 1643 | ||
1634 | static int ahash_init(struct ahash_request *areq) | 1644 | static int ahash_init(struct ahash_request *areq) |
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index 13621cc8cf4c..64ebede184f1 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h | |||
@@ -36,6 +36,7 @@ static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num, | |||
36 | { | 36 | { |
37 | sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0); | 37 | sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0); |
38 | sg1[num - 1].page_link &= ~0x02; | 38 | sg1[num - 1].page_link &= ~0x02; |
39 | sg1[num - 1].page_link |= 0x01; | ||
39 | } | 40 | } |
40 | 41 | ||
41 | static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg) | 42 | static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg) |