aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/crypto/aes_s390.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/crypto/aes_s390.c')
-rw-r--r--arch/s390/crypto/aes_s390.c383
1 files changed, 379 insertions, 4 deletions
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 58f46734465f..a9ce135893f8 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -31,7 +31,8 @@
31#define AES_KEYLEN_192 2 31#define AES_KEYLEN_192 2
32#define AES_KEYLEN_256 4 32#define AES_KEYLEN_256 4
33 33
34static char keylen_flag = 0; 34static u8 *ctrblk;
35static char keylen_flag;
35 36
36struct s390_aes_ctx { 37struct s390_aes_ctx {
37 u8 iv[AES_BLOCK_SIZE]; 38 u8 iv[AES_BLOCK_SIZE];
@@ -45,6 +46,24 @@ struct s390_aes_ctx {
45 } fallback; 46 } fallback;
46}; 47};
47 48
49struct pcc_param {
50 u8 key[32];
51 u8 tweak[16];
52 u8 block[16];
53 u8 bit[16];
54 u8 xts[16];
55};
56
57struct s390_xts_ctx {
58 u8 key[32];
59 u8 xts_param[16];
60 struct pcc_param pcc;
61 long enc;
62 long dec;
63 int key_len;
64 struct crypto_blkcipher *fallback;
65};
66
48/* 67/*
49 * Check if the key_len is supported by the HW. 68 * Check if the key_len is supported by the HW.
50 * Returns 0 if it is, a positive number if it is not and software fallback is 69 * Returns 0 if it is, a positive number if it is not and software fallback is
@@ -504,15 +523,337 @@ static struct crypto_alg cbc_aes_alg = {
504 } 523 }
505}; 524};
506 525
526static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
527 unsigned int len)
528{
529 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
530 unsigned int ret;
531
532 xts_ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
533 xts_ctx->fallback->base.crt_flags |= (tfm->crt_flags &
534 CRYPTO_TFM_REQ_MASK);
535
536 ret = crypto_blkcipher_setkey(xts_ctx->fallback, key, len);
537 if (ret) {
538 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
539 tfm->crt_flags |= (xts_ctx->fallback->base.crt_flags &
540 CRYPTO_TFM_RES_MASK);
541 }
542 return ret;
543}
544
545static int xts_fallback_decrypt(struct blkcipher_desc *desc,
546 struct scatterlist *dst, struct scatterlist *src,
547 unsigned int nbytes)
548{
549 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
550 struct crypto_blkcipher *tfm;
551 unsigned int ret;
552
553 tfm = desc->tfm;
554 desc->tfm = xts_ctx->fallback;
555
556 ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
557
558 desc->tfm = tfm;
559 return ret;
560}
561
562static int xts_fallback_encrypt(struct blkcipher_desc *desc,
563 struct scatterlist *dst, struct scatterlist *src,
564 unsigned int nbytes)
565{
566 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
567 struct crypto_blkcipher *tfm;
568 unsigned int ret;
569
570 tfm = desc->tfm;
571 desc->tfm = xts_ctx->fallback;
572
573 ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
574
575 desc->tfm = tfm;
576 return ret;
577}
578
579static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
580 unsigned int key_len)
581{
582 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
583 u32 *flags = &tfm->crt_flags;
584
585 switch (key_len) {
586 case 32:
587 xts_ctx->enc = KM_XTS_128_ENCRYPT;
588 xts_ctx->dec = KM_XTS_128_DECRYPT;
589 memcpy(xts_ctx->key + 16, in_key, 16);
590 memcpy(xts_ctx->pcc.key + 16, in_key + 16, 16);
591 break;
592 case 48:
593 xts_ctx->enc = 0;
594 xts_ctx->dec = 0;
595 xts_fallback_setkey(tfm, in_key, key_len);
596 break;
597 case 64:
598 xts_ctx->enc = KM_XTS_256_ENCRYPT;
599 xts_ctx->dec = KM_XTS_256_DECRYPT;
600 memcpy(xts_ctx->key, in_key, 32);
601 memcpy(xts_ctx->pcc.key, in_key + 32, 32);
602 break;
603 default:
604 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
605 return -EINVAL;
606 }
607 xts_ctx->key_len = key_len;
608 return 0;
609}
610
611static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
612 struct s390_xts_ctx *xts_ctx,
613 struct blkcipher_walk *walk)
614{
615 unsigned int offset = (xts_ctx->key_len >> 1) & 0x10;
616 int ret = blkcipher_walk_virt(desc, walk);
617 unsigned int nbytes = walk->nbytes;
618 unsigned int n;
619 u8 *in, *out;
620 void *param;
621
622 if (!nbytes)
623 goto out;
624
625 memset(xts_ctx->pcc.block, 0, sizeof(xts_ctx->pcc.block));
626 memset(xts_ctx->pcc.bit, 0, sizeof(xts_ctx->pcc.bit));
627 memset(xts_ctx->pcc.xts, 0, sizeof(xts_ctx->pcc.xts));
628 memcpy(xts_ctx->pcc.tweak, walk->iv, sizeof(xts_ctx->pcc.tweak));
629 param = xts_ctx->pcc.key + offset;
630 ret = crypt_s390_pcc(func, param);
631 BUG_ON(ret < 0);
632
633 memcpy(xts_ctx->xts_param, xts_ctx->pcc.xts, 16);
634 param = xts_ctx->key + offset;
635 do {
636 /* only use complete blocks */
637 n = nbytes & ~(AES_BLOCK_SIZE - 1);
638 out = walk->dst.virt.addr;
639 in = walk->src.virt.addr;
640
641 ret = crypt_s390_km(func, param, out, in, n);
642 BUG_ON(ret < 0 || ret != n);
643
644 nbytes &= AES_BLOCK_SIZE - 1;
645 ret = blkcipher_walk_done(desc, walk, nbytes);
646 } while ((nbytes = walk->nbytes));
647out:
648 return ret;
649}
650
651static int xts_aes_encrypt(struct blkcipher_desc *desc,
652 struct scatterlist *dst, struct scatterlist *src,
653 unsigned int nbytes)
654{
655 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
656 struct blkcipher_walk walk;
657
658 if (unlikely(xts_ctx->key_len == 48))
659 return xts_fallback_encrypt(desc, dst, src, nbytes);
660
661 blkcipher_walk_init(&walk, dst, src, nbytes);
662 return xts_aes_crypt(desc, xts_ctx->enc, xts_ctx, &walk);
663}
664
665static int xts_aes_decrypt(struct blkcipher_desc *desc,
666 struct scatterlist *dst, struct scatterlist *src,
667 unsigned int nbytes)
668{
669 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
670 struct blkcipher_walk walk;
671
672 if (unlikely(xts_ctx->key_len == 48))
673 return xts_fallback_decrypt(desc, dst, src, nbytes);
674
675 blkcipher_walk_init(&walk, dst, src, nbytes);
676 return xts_aes_crypt(desc, xts_ctx->dec, xts_ctx, &walk);
677}
678
679static int xts_fallback_init(struct crypto_tfm *tfm)
680{
681 const char *name = tfm->__crt_alg->cra_name;
682 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
683
684 xts_ctx->fallback = crypto_alloc_blkcipher(name, 0,
685 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
686
687 if (IS_ERR(xts_ctx->fallback)) {
688 pr_err("Allocating XTS fallback algorithm %s failed\n",
689 name);
690 return PTR_ERR(xts_ctx->fallback);
691 }
692 return 0;
693}
694
695static void xts_fallback_exit(struct crypto_tfm *tfm)
696{
697 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
698
699 crypto_free_blkcipher(xts_ctx->fallback);
700 xts_ctx->fallback = NULL;
701}
702
703static struct crypto_alg xts_aes_alg = {
704 .cra_name = "xts(aes)",
705 .cra_driver_name = "xts-aes-s390",
706 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
707 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
708 CRYPTO_ALG_NEED_FALLBACK,
709 .cra_blocksize = AES_BLOCK_SIZE,
710 .cra_ctxsize = sizeof(struct s390_xts_ctx),
711 .cra_type = &crypto_blkcipher_type,
712 .cra_module = THIS_MODULE,
713 .cra_list = LIST_HEAD_INIT(xts_aes_alg.cra_list),
714 .cra_init = xts_fallback_init,
715 .cra_exit = xts_fallback_exit,
716 .cra_u = {
717 .blkcipher = {
718 .min_keysize = 2 * AES_MIN_KEY_SIZE,
719 .max_keysize = 2 * AES_MAX_KEY_SIZE,
720 .ivsize = AES_BLOCK_SIZE,
721 .setkey = xts_aes_set_key,
722 .encrypt = xts_aes_encrypt,
723 .decrypt = xts_aes_decrypt,
724 }
725 }
726};
727
728static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
729 unsigned int key_len)
730{
731 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
732
733 switch (key_len) {
734 case 16:
735 sctx->enc = KMCTR_AES_128_ENCRYPT;
736 sctx->dec = KMCTR_AES_128_DECRYPT;
737 break;
738 case 24:
739 sctx->enc = KMCTR_AES_192_ENCRYPT;
740 sctx->dec = KMCTR_AES_192_DECRYPT;
741 break;
742 case 32:
743 sctx->enc = KMCTR_AES_256_ENCRYPT;
744 sctx->dec = KMCTR_AES_256_DECRYPT;
745 break;
746 }
747
748 return aes_set_key(tfm, in_key, key_len);
749}
750
751static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
752 struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
753{
754 int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
755 unsigned int i, n, nbytes;
756 u8 buf[AES_BLOCK_SIZE];
757 u8 *out, *in;
758
759 if (!walk->nbytes)
760 return ret;
761
762 memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE);
763 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
764 out = walk->dst.virt.addr;
765 in = walk->src.virt.addr;
766 while (nbytes >= AES_BLOCK_SIZE) {
767 /* only use complete blocks, max. PAGE_SIZE */
768 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
769 nbytes & ~(AES_BLOCK_SIZE - 1);
770 for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
771 memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE,
772 AES_BLOCK_SIZE);
773 crypto_inc(ctrblk + i, AES_BLOCK_SIZE);
774 }
775 ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk);
776 BUG_ON(ret < 0 || ret != n);
777 if (n > AES_BLOCK_SIZE)
778 memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE,
779 AES_BLOCK_SIZE);
780 crypto_inc(ctrblk, AES_BLOCK_SIZE);
781 out += n;
782 in += n;
783 nbytes -= n;
784 }
785 ret = blkcipher_walk_done(desc, walk, nbytes);
786 }
787 /*
788 * final block may be < AES_BLOCK_SIZE, copy only nbytes
789 */
790 if (nbytes) {
791 out = walk->dst.virt.addr;
792 in = walk->src.virt.addr;
793 ret = crypt_s390_kmctr(func, sctx->key, buf, in,
794 AES_BLOCK_SIZE, ctrblk);
795 BUG_ON(ret < 0 || ret != AES_BLOCK_SIZE);
796 memcpy(out, buf, nbytes);
797 crypto_inc(ctrblk, AES_BLOCK_SIZE);
798 ret = blkcipher_walk_done(desc, walk, 0);
799 }
800 memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE);
801 return ret;
802}
803
804static int ctr_aes_encrypt(struct blkcipher_desc *desc,
805 struct scatterlist *dst, struct scatterlist *src,
806 unsigned int nbytes)
807{
808 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
809 struct blkcipher_walk walk;
810
811 blkcipher_walk_init(&walk, dst, src, nbytes);
812 return ctr_aes_crypt(desc, sctx->enc, sctx, &walk);
813}
814
815static int ctr_aes_decrypt(struct blkcipher_desc *desc,
816 struct scatterlist *dst, struct scatterlist *src,
817 unsigned int nbytes)
818{
819 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
820 struct blkcipher_walk walk;
821
822 blkcipher_walk_init(&walk, dst, src, nbytes);
823 return ctr_aes_crypt(desc, sctx->dec, sctx, &walk);
824}
825
826static struct crypto_alg ctr_aes_alg = {
827 .cra_name = "ctr(aes)",
828 .cra_driver_name = "ctr-aes-s390",
829 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
830 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
831 .cra_blocksize = 1,
832 .cra_ctxsize = sizeof(struct s390_aes_ctx),
833 .cra_type = &crypto_blkcipher_type,
834 .cra_module = THIS_MODULE,
835 .cra_list = LIST_HEAD_INIT(ctr_aes_alg.cra_list),
836 .cra_u = {
837 .blkcipher = {
838 .min_keysize = AES_MIN_KEY_SIZE,
839 .max_keysize = AES_MAX_KEY_SIZE,
840 .ivsize = AES_BLOCK_SIZE,
841 .setkey = ctr_aes_set_key,
842 .encrypt = ctr_aes_encrypt,
843 .decrypt = ctr_aes_decrypt,
844 }
845 }
846};
847
507static int __init aes_s390_init(void) 848static int __init aes_s390_init(void)
508{ 849{
509 int ret; 850 int ret;
510 851
511 if (crypt_s390_func_available(KM_AES_128_ENCRYPT)) 852 if (crypt_s390_func_available(KM_AES_128_ENCRYPT, CRYPT_S390_MSA))
512 keylen_flag |= AES_KEYLEN_128; 853 keylen_flag |= AES_KEYLEN_128;
513 if (crypt_s390_func_available(KM_AES_192_ENCRYPT)) 854 if (crypt_s390_func_available(KM_AES_192_ENCRYPT, CRYPT_S390_MSA))
514 keylen_flag |= AES_KEYLEN_192; 855 keylen_flag |= AES_KEYLEN_192;
515 if (crypt_s390_func_available(KM_AES_256_ENCRYPT)) 856 if (crypt_s390_func_available(KM_AES_256_ENCRYPT, CRYPT_S390_MSA))
516 keylen_flag |= AES_KEYLEN_256; 857 keylen_flag |= AES_KEYLEN_256;
517 858
518 if (!keylen_flag) 859 if (!keylen_flag)
@@ -535,9 +876,40 @@ static int __init aes_s390_init(void)
535 if (ret) 876 if (ret)
536 goto cbc_aes_err; 877 goto cbc_aes_err;
537 878
879 if (crypt_s390_func_available(KM_XTS_128_ENCRYPT,
880 CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
881 crypt_s390_func_available(KM_XTS_256_ENCRYPT,
882 CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
883 ret = crypto_register_alg(&xts_aes_alg);
884 if (ret)
885 goto xts_aes_err;
886 }
887
888 if (crypt_s390_func_available(KMCTR_AES_128_ENCRYPT,
889 CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
890 crypt_s390_func_available(KMCTR_AES_192_ENCRYPT,
891 CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
892 crypt_s390_func_available(KMCTR_AES_256_ENCRYPT,
893 CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
894 ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
895 if (!ctrblk) {
896 ret = -ENOMEM;
897 goto ctr_aes_err;
898 }
899 ret = crypto_register_alg(&ctr_aes_alg);
900 if (ret) {
901 free_page((unsigned long) ctrblk);
902 goto ctr_aes_err;
903 }
904 }
905
538out: 906out:
539 return ret; 907 return ret;
540 908
909ctr_aes_err:
910 crypto_unregister_alg(&xts_aes_alg);
911xts_aes_err:
912 crypto_unregister_alg(&cbc_aes_alg);
541cbc_aes_err: 913cbc_aes_err:
542 crypto_unregister_alg(&ecb_aes_alg); 914 crypto_unregister_alg(&ecb_aes_alg);
543ecb_aes_err: 915ecb_aes_err:
@@ -548,6 +920,9 @@ aes_err:
548 920
549static void __exit aes_s390_fini(void) 921static void __exit aes_s390_fini(void)
550{ 922{
923 crypto_unregister_alg(&ctr_aes_alg);
924 free_page((unsigned long) ctrblk);
925 crypto_unregister_alg(&xts_aes_alg);
551 crypto_unregister_alg(&cbc_aes_alg); 926 crypto_unregister_alg(&cbc_aes_alg);
552 crypto_unregister_alg(&ecb_aes_alg); 927 crypto_unregister_alg(&ecb_aes_alg);
553 crypto_unregister_alg(&aes_alg); 928 crypto_unregister_alg(&aes_alg);