aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHoria Geantă <horia.geanta@nxp.com>2017-02-10 07:07:22 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2017-02-15 00:23:41 -0500
commitbbf2234494afd14a720d61a233c21b95e4261326 (patch)
treeacff483ada06a7a1602dd7070aa31fa750951a33
parentcfb725f6d3d31355fa4510da7d7bdce807045b42 (diff)
crypto: caam - fix DMA API leaks for multiple setkey() calls
setkey() callback may be invoked multiple times for the same tfm. In this case, DMA API leaks are caused by shared descriptors (and key for caamalg) being mapped several times and unmapped only once. Fix this by performing mapping / unmapping only in crypto algorithm's cra_init() / cra_exit() callbacks and sync_for_device in the setkey() tfm callback. Signed-off-by: Horia Geantă <horia.geanta@nxp.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--drivers/crypto/caam/caamalg.c275
-rw-r--r--drivers/crypto/caam/caamhash.c79
2 files changed, 102 insertions, 252 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 71d09e896d48..9bc80eb06934 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -134,15 +134,15 @@ struct caam_aead_alg {
134 * per-session context 134 * per-session context
135 */ 135 */
136struct caam_ctx { 136struct caam_ctx {
137 struct device *jrdev;
138 u32 sh_desc_enc[DESC_MAX_USED_LEN]; 137 u32 sh_desc_enc[DESC_MAX_USED_LEN];
139 u32 sh_desc_dec[DESC_MAX_USED_LEN]; 138 u32 sh_desc_dec[DESC_MAX_USED_LEN];
140 u32 sh_desc_givenc[DESC_MAX_USED_LEN]; 139 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
140 u8 key[CAAM_MAX_KEY_SIZE];
141 dma_addr_t sh_desc_enc_dma; 141 dma_addr_t sh_desc_enc_dma;
142 dma_addr_t sh_desc_dec_dma; 142 dma_addr_t sh_desc_dec_dma;
143 dma_addr_t sh_desc_givenc_dma; 143 dma_addr_t sh_desc_givenc_dma;
144 u8 key[CAAM_MAX_KEY_SIZE];
145 dma_addr_t key_dma; 144 dma_addr_t key_dma;
145 struct device *jrdev;
146 struct alginfo adata; 146 struct alginfo adata;
147 struct alginfo cdata; 147 struct alginfo cdata;
148 unsigned int authsize; 148 unsigned int authsize;
@@ -171,13 +171,8 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
171 /* aead_encrypt shared descriptor */ 171 /* aead_encrypt shared descriptor */
172 desc = ctx->sh_desc_enc; 172 desc = ctx->sh_desc_enc;
173 cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize); 173 cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
174 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, 174 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
175 desc_bytes(desc), 175 desc_bytes(desc), DMA_TO_DEVICE);
176 DMA_TO_DEVICE);
177 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
178 dev_err(jrdev, "unable to map shared descriptor\n");
179 return -ENOMEM;
180 }
181 176
182 /* 177 /*
183 * Job Descriptor and Shared Descriptors 178 * Job Descriptor and Shared Descriptors
@@ -194,13 +189,8 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
194 /* aead_decrypt shared descriptor */ 189 /* aead_decrypt shared descriptor */
195 desc = ctx->sh_desc_dec; 190 desc = ctx->sh_desc_dec;
196 cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize); 191 cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
197 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, 192 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
198 desc_bytes(desc), 193 desc_bytes(desc), DMA_TO_DEVICE);
199 DMA_TO_DEVICE);
200 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
201 dev_err(jrdev, "unable to map shared descriptor\n");
202 return -ENOMEM;
203 }
204 194
205 return 0; 195 return 0;
206} 196}
@@ -278,13 +268,8 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
278 desc = ctx->sh_desc_enc; 268 desc = ctx->sh_desc_enc;
279 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ctx->authsize, 269 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ctx->authsize,
280 is_rfc3686, nonce, ctx1_iv_off); 270 is_rfc3686, nonce, ctx1_iv_off);
281 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, 271 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
282 desc_bytes(desc), 272 desc_bytes(desc), DMA_TO_DEVICE);
283 DMA_TO_DEVICE);
284 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
285 dev_err(jrdev, "unable to map shared descriptor\n");
286 return -ENOMEM;
287 }
288 273
289skip_enc: 274skip_enc:
290 /* 275 /*
@@ -315,13 +300,8 @@ skip_enc:
315 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize, 300 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
316 ctx->authsize, alg->caam.geniv, is_rfc3686, 301 ctx->authsize, alg->caam.geniv, is_rfc3686,
317 nonce, ctx1_iv_off); 302 nonce, ctx1_iv_off);
318 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, 303 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
319 desc_bytes(desc), 304 desc_bytes(desc), DMA_TO_DEVICE);
320 DMA_TO_DEVICE);
321 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
322 dev_err(jrdev, "unable to map shared descriptor\n");
323 return -ENOMEM;
324 }
325 305
326 if (!alg->caam.geniv) 306 if (!alg->caam.geniv)
327 goto skip_givenc; 307 goto skip_givenc;
@@ -354,13 +334,8 @@ skip_enc:
354 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize, 334 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
355 ctx->authsize, is_rfc3686, nonce, 335 ctx->authsize, is_rfc3686, nonce,
356 ctx1_iv_off); 336 ctx1_iv_off);
357 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, 337 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
358 desc_bytes(desc), 338 desc_bytes(desc), DMA_TO_DEVICE);
359 DMA_TO_DEVICE);
360 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
361 dev_err(jrdev, "unable to map shared descriptor\n");
362 return -ENOMEM;
363 }
364 339
365skip_givenc: 340skip_givenc:
366 return 0; 341 return 0;
@@ -403,13 +378,8 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
403 378
404 desc = ctx->sh_desc_enc; 379 desc = ctx->sh_desc_enc;
405 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize); 380 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
406 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, 381 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
407 desc_bytes(desc), 382 desc_bytes(desc), DMA_TO_DEVICE);
408 DMA_TO_DEVICE);
409 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
410 dev_err(jrdev, "unable to map shared descriptor\n");
411 return -ENOMEM;
412 }
413 383
414 /* 384 /*
415 * Job Descriptor and Shared Descriptors 385 * Job Descriptor and Shared Descriptors
@@ -425,13 +395,8 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
425 395
426 desc = ctx->sh_desc_dec; 396 desc = ctx->sh_desc_dec;
427 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize); 397 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
428 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, 398 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
429 desc_bytes(desc), 399 desc_bytes(desc), DMA_TO_DEVICE);
430 DMA_TO_DEVICE);
431 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
432 dev_err(jrdev, "unable to map shared descriptor\n");
433 return -ENOMEM;
434 }
435 400
436 return 0; 401 return 0;
437} 402}
@@ -472,13 +437,8 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
472 437
473 desc = ctx->sh_desc_enc; 438 desc = ctx->sh_desc_enc;
474 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize); 439 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
475 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, 440 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
476 desc_bytes(desc), 441 desc_bytes(desc), DMA_TO_DEVICE);
477 DMA_TO_DEVICE);
478 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
479 dev_err(jrdev, "unable to map shared descriptor\n");
480 return -ENOMEM;
481 }
482 442
483 /* 443 /*
484 * Job Descriptor and Shared Descriptors 444 * Job Descriptor and Shared Descriptors
@@ -494,13 +454,8 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
494 454
495 desc = ctx->sh_desc_dec; 455 desc = ctx->sh_desc_dec;
496 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize); 456 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
497 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, 457 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
498 desc_bytes(desc), 458 desc_bytes(desc), DMA_TO_DEVICE);
499 DMA_TO_DEVICE);
500 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
501 dev_err(jrdev, "unable to map shared descriptor\n");
502 return -ENOMEM;
503 }
504 459
505 return 0; 460 return 0;
506} 461}
@@ -542,13 +497,8 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
542 497
543 desc = ctx->sh_desc_enc; 498 desc = ctx->sh_desc_enc;
544 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize); 499 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
545 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, 500 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
546 desc_bytes(desc), 501 desc_bytes(desc), DMA_TO_DEVICE);
547 DMA_TO_DEVICE);
548 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
549 dev_err(jrdev, "unable to map shared descriptor\n");
550 return -ENOMEM;
551 }
552 502
553 /* 503 /*
554 * Job Descriptor and Shared Descriptors 504 * Job Descriptor and Shared Descriptors
@@ -564,13 +514,8 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
564 514
565 desc = ctx->sh_desc_dec; 515 desc = ctx->sh_desc_dec;
566 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize); 516 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
567 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, 517 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
568 desc_bytes(desc), 518 desc_bytes(desc), DMA_TO_DEVICE);
569 DMA_TO_DEVICE);
570 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
571 dev_err(jrdev, "unable to map shared descriptor\n");
572 return -ENOMEM;
573 }
574 519
575 return 0; 520 return 0;
576} 521}
@@ -614,28 +559,15 @@ static int aead_setkey(struct crypto_aead *aead,
614 559
615 /* postpend encryption key to auth split key */ 560 /* postpend encryption key to auth split key */
616 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); 561 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
617 562 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
618 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->adata.keylen_pad + 563 keys.enckeylen, DMA_TO_DEVICE);
619 keys.enckeylen, DMA_TO_DEVICE);
620 if (dma_mapping_error(jrdev, ctx->key_dma)) {
621 dev_err(jrdev, "unable to map key i/o memory\n");
622 return -ENOMEM;
623 }
624#ifdef DEBUG 564#ifdef DEBUG
625 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", 565 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
626 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 566 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
627 ctx->adata.keylen_pad + keys.enckeylen, 1); 567 ctx->adata.keylen_pad + keys.enckeylen, 1);
628#endif 568#endif
629
630 ctx->cdata.keylen = keys.enckeylen; 569 ctx->cdata.keylen = keys.enckeylen;
631 570 return aead_set_sh_desc(aead);
632 ret = aead_set_sh_desc(aead);
633 if (ret) {
634 dma_unmap_single(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
635 keys.enckeylen, DMA_TO_DEVICE);
636 }
637
638 return ret;
639badkey: 571badkey:
640 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 572 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
641 return -EINVAL; 573 return -EINVAL;
@@ -646,7 +578,6 @@ static int gcm_setkey(struct crypto_aead *aead,
646{ 578{
647 struct caam_ctx *ctx = crypto_aead_ctx(aead); 579 struct caam_ctx *ctx = crypto_aead_ctx(aead);
648 struct device *jrdev = ctx->jrdev; 580 struct device *jrdev = ctx->jrdev;
649 int ret = 0;
650 581
651#ifdef DEBUG 582#ifdef DEBUG
652 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 583 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
@@ -654,21 +585,10 @@ static int gcm_setkey(struct crypto_aead *aead,
654#endif 585#endif
655 586
656 memcpy(ctx->key, key, keylen); 587 memcpy(ctx->key, key, keylen);
657 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, 588 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
658 DMA_TO_DEVICE);
659 if (dma_mapping_error(jrdev, ctx->key_dma)) {
660 dev_err(jrdev, "unable to map key i/o memory\n");
661 return -ENOMEM;
662 }
663 ctx->cdata.keylen = keylen; 589 ctx->cdata.keylen = keylen;
664 590
665 ret = gcm_set_sh_desc(aead); 591 return gcm_set_sh_desc(aead);
666 if (ret) {
667 dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
668 DMA_TO_DEVICE);
669 }
670
671 return ret;
672} 592}
673 593
674static int rfc4106_setkey(struct crypto_aead *aead, 594static int rfc4106_setkey(struct crypto_aead *aead,
@@ -676,7 +596,6 @@ static int rfc4106_setkey(struct crypto_aead *aead,
676{ 596{
677 struct caam_ctx *ctx = crypto_aead_ctx(aead); 597 struct caam_ctx *ctx = crypto_aead_ctx(aead);
678 struct device *jrdev = ctx->jrdev; 598 struct device *jrdev = ctx->jrdev;
679 int ret = 0;
680 599
681 if (keylen < 4) 600 if (keylen < 4)
682 return -EINVAL; 601 return -EINVAL;
@@ -693,21 +612,9 @@ static int rfc4106_setkey(struct crypto_aead *aead,
693 * in the nonce. Update the AES key length. 612 * in the nonce. Update the AES key length.
694 */ 613 */
695 ctx->cdata.keylen = keylen - 4; 614 ctx->cdata.keylen = keylen - 4;
696 615 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
697 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->cdata.keylen, 616 DMA_TO_DEVICE);
698 DMA_TO_DEVICE); 617 return rfc4106_set_sh_desc(aead);
699 if (dma_mapping_error(jrdev, ctx->key_dma)) {
700 dev_err(jrdev, "unable to map key i/o memory\n");
701 return -ENOMEM;
702 }
703
704 ret = rfc4106_set_sh_desc(aead);
705 if (ret) {
706 dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
707 DMA_TO_DEVICE);
708 }
709
710 return ret;
711} 618}
712 619
713static int rfc4543_setkey(struct crypto_aead *aead, 620static int rfc4543_setkey(struct crypto_aead *aead,
@@ -715,7 +622,6 @@ static int rfc4543_setkey(struct crypto_aead *aead,
715{ 622{
716 struct caam_ctx *ctx = crypto_aead_ctx(aead); 623 struct caam_ctx *ctx = crypto_aead_ctx(aead);
717 struct device *jrdev = ctx->jrdev; 624 struct device *jrdev = ctx->jrdev;
718 int ret = 0;
719 625
720 if (keylen < 4) 626 if (keylen < 4)
721 return -EINVAL; 627 return -EINVAL;
@@ -732,21 +638,9 @@ static int rfc4543_setkey(struct crypto_aead *aead,
732 * in the nonce. Update the AES key length. 638 * in the nonce. Update the AES key length.
733 */ 639 */
734 ctx->cdata.keylen = keylen - 4; 640 ctx->cdata.keylen = keylen - 4;
735 641 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
736 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->cdata.keylen, 642 DMA_TO_DEVICE);
737 DMA_TO_DEVICE); 643 return rfc4543_set_sh_desc(aead);
738 if (dma_mapping_error(jrdev, ctx->key_dma)) {
739 dev_err(jrdev, "unable to map key i/o memory\n");
740 return -ENOMEM;
741 }
742
743 ret = rfc4543_set_sh_desc(aead);
744 if (ret) {
745 dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
746 DMA_TO_DEVICE);
747 }
748
749 return ret;
750} 644}
751 645
752static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, 646static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
@@ -787,12 +681,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
787 keylen -= CTR_RFC3686_NONCE_SIZE; 681 keylen -= CTR_RFC3686_NONCE_SIZE;
788 } 682 }
789 683
790 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, 684 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
791 DMA_TO_DEVICE);
792 if (dma_mapping_error(jrdev, ctx->key_dma)) {
793 dev_err(jrdev, "unable to map key i/o memory\n");
794 return -ENOMEM;
795 }
796 ctx->cdata.keylen = keylen; 685 ctx->cdata.keylen = keylen;
797 ctx->cdata.key_virt = ctx->key; 686 ctx->cdata.key_virt = ctx->key;
798 ctx->cdata.key_inline = true; 687 ctx->cdata.key_inline = true;
@@ -801,37 +690,22 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
801 desc = ctx->sh_desc_enc; 690 desc = ctx->sh_desc_enc;
802 cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686, 691 cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
803 ctx1_iv_off); 692 ctx1_iv_off);
804 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, 693 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
805 desc_bytes(desc), 694 desc_bytes(desc), DMA_TO_DEVICE);
806 DMA_TO_DEVICE);
807 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
808 dev_err(jrdev, "unable to map shared descriptor\n");
809 return -ENOMEM;
810 }
811 695
812 /* ablkcipher_decrypt shared descriptor */ 696 /* ablkcipher_decrypt shared descriptor */
813 desc = ctx->sh_desc_dec; 697 desc = ctx->sh_desc_dec;
814 cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686, 698 cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
815 ctx1_iv_off); 699 ctx1_iv_off);
816 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, 700 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
817 desc_bytes(desc), 701 desc_bytes(desc), DMA_TO_DEVICE);
818 DMA_TO_DEVICE);
819 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
820 dev_err(jrdev, "unable to map shared descriptor\n");
821 return -ENOMEM;
822 }
823 702
824 /* ablkcipher_givencrypt shared descriptor */ 703 /* ablkcipher_givencrypt shared descriptor */
825 desc = ctx->sh_desc_givenc; 704 desc = ctx->sh_desc_givenc;
826 cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686, 705 cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
827 ctx1_iv_off); 706 ctx1_iv_off);
828 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, 707 dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
829 desc_bytes(desc), 708 desc_bytes(desc), DMA_TO_DEVICE);
830 DMA_TO_DEVICE);
831 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
832 dev_err(jrdev, "unable to map shared descriptor\n");
833 return -ENOMEM;
834 }
835 709
836 return 0; 710 return 0;
837} 711}
@@ -851,11 +725,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
851 } 725 }
852 726
853 memcpy(ctx->key, key, keylen); 727 memcpy(ctx->key, key, keylen);
854 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE); 728 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
855 if (dma_mapping_error(jrdev, ctx->key_dma)) {
856 dev_err(jrdev, "unable to map key i/o memory\n");
857 return -ENOMEM;
858 }
859 ctx->cdata.keylen = keylen; 729 ctx->cdata.keylen = keylen;
860 ctx->cdata.key_virt = ctx->key; 730 ctx->cdata.key_virt = ctx->key;
861 ctx->cdata.key_inline = true; 731 ctx->cdata.key_inline = true;
@@ -863,24 +733,14 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
863 /* xts_ablkcipher_encrypt shared descriptor */ 733 /* xts_ablkcipher_encrypt shared descriptor */
864 desc = ctx->sh_desc_enc; 734 desc = ctx->sh_desc_enc;
865 cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata); 735 cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
866 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), 736 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
867 DMA_TO_DEVICE); 737 desc_bytes(desc), DMA_TO_DEVICE);
868 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
869 dev_err(jrdev, "unable to map shared descriptor\n");
870 return -ENOMEM;
871 }
872 738
873 /* xts_ablkcipher_decrypt shared descriptor */ 739 /* xts_ablkcipher_decrypt shared descriptor */
874 desc = ctx->sh_desc_dec; 740 desc = ctx->sh_desc_dec;
875 cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata); 741 cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
876 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc), 742 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
877 DMA_TO_DEVICE); 743 desc_bytes(desc), DMA_TO_DEVICE);
878 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
879 dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
880 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
881 dev_err(jrdev, "unable to map shared descriptor\n");
882 return -ENOMEM;
883 }
884 744
885 return 0; 745 return 0;
886} 746}
@@ -3391,12 +3251,31 @@ struct caam_crypto_alg {
3391 3251
3392static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam) 3252static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
3393{ 3253{
3254 dma_addr_t dma_addr;
3255
3394 ctx->jrdev = caam_jr_alloc(); 3256 ctx->jrdev = caam_jr_alloc();
3395 if (IS_ERR(ctx->jrdev)) { 3257 if (IS_ERR(ctx->jrdev)) {
3396 pr_err("Job Ring Device allocation for transform failed\n"); 3258 pr_err("Job Ring Device allocation for transform failed\n");
3397 return PTR_ERR(ctx->jrdev); 3259 return PTR_ERR(ctx->jrdev);
3398 } 3260 }
3399 3261
3262 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
3263 offsetof(struct caam_ctx,
3264 sh_desc_enc_dma),
3265 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
3266 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
3267 dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
3268 caam_jr_free(ctx->jrdev);
3269 return -ENOMEM;
3270 }
3271
3272 ctx->sh_desc_enc_dma = dma_addr;
3273 ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
3274 sh_desc_dec);
3275 ctx->sh_desc_givenc_dma = dma_addr + offsetof(struct caam_ctx,
3276 sh_desc_givenc);
3277 ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
3278
3400 /* copy descriptor header template value */ 3279 /* copy descriptor header template value */
3401 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; 3280 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
3402 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; 3281 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
@@ -3426,25 +3305,9 @@ static int caam_aead_init(struct crypto_aead *tfm)
3426 3305
3427static void caam_exit_common(struct caam_ctx *ctx) 3306static void caam_exit_common(struct caam_ctx *ctx)
3428{ 3307{
3429 if (ctx->sh_desc_enc_dma && 3308 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
3430 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma)) 3309 offsetof(struct caam_ctx, sh_desc_enc_dma),
3431 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma, 3310 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
3432 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
3433 if (ctx->sh_desc_dec_dma &&
3434 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
3435 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
3436 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
3437 if (ctx->sh_desc_givenc_dma &&
3438 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
3439 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
3440 desc_bytes(ctx->sh_desc_givenc),
3441 DMA_TO_DEVICE);
3442 if (ctx->key_dma &&
3443 !dma_mapping_error(ctx->jrdev, ctx->key_dma))
3444 dma_unmap_single(ctx->jrdev, ctx->key_dma,
3445 ctx->cdata.keylen + ctx->adata.keylen_pad,
3446 DMA_TO_DEVICE);
3447
3448 caam_jr_free(ctx->jrdev); 3311 caam_jr_free(ctx->jrdev);
3449} 3312}
3450 3313
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 117bbd8c08d4..2ad83a8dc0fe 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -276,12 +276,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
276 /* ahash_update shared descriptor */ 276 /* ahash_update shared descriptor */
277 desc = ctx->sh_desc_update; 277 desc = ctx->sh_desc_update;
278 ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true); 278 ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
279 ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc), 279 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
280 DMA_TO_DEVICE); 280 desc_bytes(desc), DMA_TO_DEVICE);
281 if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
282 dev_err(jrdev, "unable to map shared descriptor\n");
283 return -ENOMEM;
284 }
285#ifdef DEBUG 281#ifdef DEBUG
286 print_hex_dump(KERN_ERR, 282 print_hex_dump(KERN_ERR,
287 "ahash update shdesc@"__stringify(__LINE__)": ", 283 "ahash update shdesc@"__stringify(__LINE__)": ",
@@ -291,13 +287,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
291 /* ahash_update_first shared descriptor */ 287 /* ahash_update_first shared descriptor */
292 desc = ctx->sh_desc_update_first; 288 desc = ctx->sh_desc_update_first;
293 ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false); 289 ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
294 ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc, 290 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
295 desc_bytes(desc), 291 desc_bytes(desc), DMA_TO_DEVICE);
296 DMA_TO_DEVICE);
297 if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
298 dev_err(jrdev, "unable to map shared descriptor\n");
299 return -ENOMEM;
300 }
301#ifdef DEBUG 292#ifdef DEBUG
302 print_hex_dump(KERN_ERR, 293 print_hex_dump(KERN_ERR,
303 "ahash update first shdesc@"__stringify(__LINE__)": ", 294 "ahash update first shdesc@"__stringify(__LINE__)": ",
@@ -307,12 +298,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
307 /* ahash_final shared descriptor */ 298 /* ahash_final shared descriptor */
308 desc = ctx->sh_desc_fin; 299 desc = ctx->sh_desc_fin;
309 ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true); 300 ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
310 ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc), 301 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
311 DMA_TO_DEVICE); 302 desc_bytes(desc), DMA_TO_DEVICE);
312 if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
313 dev_err(jrdev, "unable to map shared descriptor\n");
314 return -ENOMEM;
315 }
316#ifdef DEBUG 303#ifdef DEBUG
317 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ", 304 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
318 DUMP_PREFIX_ADDRESS, 16, 4, desc, 305 DUMP_PREFIX_ADDRESS, 16, 4, desc,
@@ -322,13 +309,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
322 /* ahash_digest shared descriptor */ 309 /* ahash_digest shared descriptor */
323 desc = ctx->sh_desc_digest; 310 desc = ctx->sh_desc_digest;
324 ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false); 311 ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
325 ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc, 312 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
326 desc_bytes(desc), 313 desc_bytes(desc), DMA_TO_DEVICE);
327 DMA_TO_DEVICE);
328 if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
329 dev_err(jrdev, "unable to map shared descriptor\n");
330 return -ENOMEM;
331 }
332#ifdef DEBUG 314#ifdef DEBUG
333 print_hex_dump(KERN_ERR, 315 print_hex_dump(KERN_ERR,
334 "ahash digest shdesc@"__stringify(__LINE__)": ", 316 "ahash digest shdesc@"__stringify(__LINE__)": ",
@@ -1716,6 +1698,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
1716 HASH_MSG_LEN + SHA256_DIGEST_SIZE, 1698 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1717 HASH_MSG_LEN + 64, 1699 HASH_MSG_LEN + 64,
1718 HASH_MSG_LEN + SHA512_DIGEST_SIZE }; 1700 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1701 dma_addr_t dma_addr;
1719 1702
1720 /* 1703 /*
1721 * Get a Job ring from Job Ring driver to ensure in-order 1704 * Get a Job ring from Job Ring driver to ensure in-order
@@ -1726,6 +1709,26 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
1726 pr_err("Job Ring Device allocation for transform failed\n"); 1709 pr_err("Job Ring Device allocation for transform failed\n");
1727 return PTR_ERR(ctx->jrdev); 1710 return PTR_ERR(ctx->jrdev);
1728 } 1711 }
1712
1713 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1714 offsetof(struct caam_hash_ctx,
1715 sh_desc_update_dma),
1716 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
1717 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1718 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1719 caam_jr_free(ctx->jrdev);
1720 return -ENOMEM;
1721 }
1722
1723 ctx->sh_desc_update_dma = dma_addr;
1724 ctx->sh_desc_update_first_dma = dma_addr +
1725 offsetof(struct caam_hash_ctx,
1726 sh_desc_update_first);
1727 ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1728 sh_desc_fin);
1729 ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1730 sh_desc_digest);
1731
1729 /* copy descriptor header template value */ 1732 /* copy descriptor header template value */
1730 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; 1733 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1731 1734
@@ -1742,26 +1745,10 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1742{ 1745{
1743 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1746 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1744 1747
1745 if (ctx->sh_desc_update_dma && 1748 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1746 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma)) 1749 offsetof(struct caam_hash_ctx,
1747 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma, 1750 sh_desc_update_dma),
1748 desc_bytes(ctx->sh_desc_update), 1751 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
1749 DMA_TO_DEVICE);
1750 if (ctx->sh_desc_update_first_dma &&
1751 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1752 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1753 desc_bytes(ctx->sh_desc_update_first),
1754 DMA_TO_DEVICE);
1755 if (ctx->sh_desc_fin_dma &&
1756 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1757 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1758 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1759 if (ctx->sh_desc_digest_dma &&
1760 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1761 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1762 desc_bytes(ctx->sh_desc_digest),
1763 DMA_TO_DEVICE);
1764
1765 caam_jr_free(ctx->jrdev); 1752 caam_jr_free(ctx->jrdev);
1766} 1753}
1767 1754