diff options
Diffstat (limited to 'arch/x86/crypto/aesni-intel_glue.c')
-rw-r--r-- | arch/x86/crypto/aesni-intel_glue.c | 557 |
1 files changed, 550 insertions, 7 deletions
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 2cb3dcc4490a..feee8ff1d05e 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
@@ -5,6 +5,14 @@ | |||
5 | * Copyright (C) 2008, Intel Corp. | 5 | * Copyright (C) 2008, Intel Corp. |
6 | * Author: Huang Ying <ying.huang@intel.com> | 6 | * Author: Huang Ying <ying.huang@intel.com> |
7 | * | 7 | * |
8 | * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD | ||
9 | * interface for 64-bit kernels. | ||
10 | * Authors: Adrian Hoban <adrian.hoban@intel.com> | ||
11 | * Gabriele Paoloni <gabriele.paoloni@intel.com> | ||
12 | * Tadeusz Struk (tadeusz.struk@intel.com) | ||
13 | * Aidan O'Mahony (aidan.o.mahony@intel.com) | ||
14 | * Copyright (c) 2010, Intel Corporation. | ||
15 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | 16 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License as published by | 17 | * it under the terms of the GNU General Public License as published by |
10 | * the Free Software Foundation; either version 2 of the License, or | 18 | * the Free Software Foundation; either version 2 of the License, or |
@@ -21,6 +29,10 @@ | |||
21 | #include <crypto/ctr.h> | 29 | #include <crypto/ctr.h> |
22 | #include <asm/i387.h> | 30 | #include <asm/i387.h> |
23 | #include <asm/aes.h> | 31 | #include <asm/aes.h> |
32 | #include <crypto/scatterwalk.h> | ||
33 | #include <crypto/internal/aead.h> | ||
34 | #include <linux/workqueue.h> | ||
35 | #include <linux/spinlock.h> | ||
24 | 36 | ||
25 | #if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE) | 37 | #if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE) |
26 | #define HAS_CTR | 38 | #define HAS_CTR |
@@ -42,8 +54,31 @@ struct async_aes_ctx { | |||
42 | struct cryptd_ablkcipher *cryptd_tfm; | 54 | struct cryptd_ablkcipher *cryptd_tfm; |
43 | }; | 55 | }; |
44 | 56 | ||
45 | #define AESNI_ALIGN 16 | 57 | /* This data is stored at the end of the crypto_tfm struct. |
58 | * It's a type of per "session" data storage location. | ||
59 | * This needs to be 16 byte aligned. | ||
60 | */ | ||
61 | struct aesni_rfc4106_gcm_ctx { | ||
62 | u8 hash_subkey[16]; | ||
63 | struct crypto_aes_ctx aes_key_expanded; | ||
64 | u8 nonce[4]; | ||
65 | struct cryptd_aead *cryptd_tfm; | ||
66 | }; | ||
67 | |||
68 | struct aesni_gcm_set_hash_subkey_result { | ||
69 | int err; | ||
70 | struct completion completion; | ||
71 | }; | ||
72 | |||
73 | struct aesni_hash_subkey_req_data { | ||
74 | u8 iv[16]; | ||
75 | struct aesni_gcm_set_hash_subkey_result result; | ||
76 | struct scatterlist sg; | ||
77 | }; | ||
78 | |||
79 | #define AESNI_ALIGN (16) | ||
46 | #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1)) | 80 | #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1)) |
81 | #define RFC4106_HASH_SUBKEY_SIZE 16 | ||
47 | 82 | ||
48 | asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key, | 83 | asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key, |
49 | unsigned int key_len); | 84 | unsigned int key_len); |
@@ -59,9 +94,66 @@ asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out, | |||
59 | const u8 *in, unsigned int len, u8 *iv); | 94 | const u8 *in, unsigned int len, u8 *iv); |
60 | asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out, | 95 | asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out, |
61 | const u8 *in, unsigned int len, u8 *iv); | 96 | const u8 *in, unsigned int len, u8 *iv); |
97 | |||
98 | int crypto_fpu_init(void); | ||
99 | void crypto_fpu_exit(void); | ||
100 | |||
101 | #ifdef CONFIG_X86_64 | ||
62 | asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out, | 102 | asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out, |
63 | const u8 *in, unsigned int len, u8 *iv); | 103 | const u8 *in, unsigned int len, u8 *iv); |
64 | 104 | ||
105 | /* asmlinkage void aesni_gcm_enc() | ||
106 | * void *ctx, AES Key schedule. Starts on a 16 byte boundary. | ||
107 | * u8 *out, Ciphertext output. Encrypt in-place is allowed. | ||
108 | * const u8 *in, Plaintext input | ||
109 | * unsigned long plaintext_len, Length of data in bytes for encryption. | ||
110 | * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association) | ||
111 | * concatenated with 8 byte Initialisation Vector (from IPSec ESP | ||
112 | * Payload) concatenated with 0x00000001. 16-byte aligned pointer. | ||
113 | * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. | ||
114 | * const u8 *aad, Additional Authentication Data (AAD) | ||
115 | * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this | ||
116 | * is going to be 8 or 12 bytes | ||
117 | * u8 *auth_tag, Authenticated Tag output. | ||
118 | * unsigned long auth_tag_len), Authenticated Tag Length in bytes. | ||
119 | * Valid values are 16 (most likely), 12 or 8. | ||
120 | */ | ||
121 | asmlinkage void aesni_gcm_enc(void *ctx, u8 *out, | ||
122 | const u8 *in, unsigned long plaintext_len, u8 *iv, | ||
123 | u8 *hash_subkey, const u8 *aad, unsigned long aad_len, | ||
124 | u8 *auth_tag, unsigned long auth_tag_len); | ||
125 | |||
126 | /* asmlinkage void aesni_gcm_dec() | ||
127 | * void *ctx, AES Key schedule. Starts on a 16 byte boundary. | ||
128 | * u8 *out, Plaintext output. Decrypt in-place is allowed. | ||
129 | * const u8 *in, Ciphertext input | ||
130 | * unsigned long ciphertext_len, Length of data in bytes for decryption. | ||
131 | * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association) | ||
132 | * concatenated with 8 byte Initialisation Vector (from IPSec ESP | ||
133 | * Payload) concatenated with 0x00000001. 16-byte aligned pointer. | ||
134 | * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. | ||
135 | * const u8 *aad, Additional Authentication Data (AAD) | ||
136 | * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going | ||
137 | * to be 8 or 12 bytes | ||
138 | * u8 *auth_tag, Authenticated Tag output. | ||
139 | * unsigned long auth_tag_len) Authenticated Tag Length in bytes. | ||
140 | * Valid values are 16 (most likely), 12 or 8. | ||
141 | */ | ||
142 | asmlinkage void aesni_gcm_dec(void *ctx, u8 *out, | ||
143 | const u8 *in, unsigned long ciphertext_len, u8 *iv, | ||
144 | u8 *hash_subkey, const u8 *aad, unsigned long aad_len, | ||
145 | u8 *auth_tag, unsigned long auth_tag_len); | ||
146 | |||
147 | static inline struct | ||
148 | aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm) | ||
149 | { | ||
150 | return | ||
151 | (struct aesni_rfc4106_gcm_ctx *) | ||
152 | PTR_ALIGN((u8 *) | ||
153 | crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN); | ||
154 | } | ||
155 | #endif | ||
156 | |||
65 | static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx) | 157 | static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx) |
66 | { | 158 | { |
67 | unsigned long addr = (unsigned long)raw_ctx; | 159 | unsigned long addr = (unsigned long)raw_ctx; |
@@ -324,6 +416,7 @@ static struct crypto_alg blk_cbc_alg = { | |||
324 | }, | 416 | }, |
325 | }; | 417 | }; |
326 | 418 | ||
419 | #ifdef CONFIG_X86_64 | ||
327 | static void ctr_crypt_final(struct crypto_aes_ctx *ctx, | 420 | static void ctr_crypt_final(struct crypto_aes_ctx *ctx, |
328 | struct blkcipher_walk *walk) | 421 | struct blkcipher_walk *walk) |
329 | { | 422 | { |
@@ -389,6 +482,7 @@ static struct crypto_alg blk_ctr_alg = { | |||
389 | }, | 482 | }, |
390 | }, | 483 | }, |
391 | }; | 484 | }; |
485 | #endif | ||
392 | 486 | ||
393 | static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, | 487 | static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, |
394 | unsigned int key_len) | 488 | unsigned int key_len) |
@@ -536,6 +630,7 @@ static struct crypto_alg ablk_cbc_alg = { | |||
536 | }, | 630 | }, |
537 | }; | 631 | }; |
538 | 632 | ||
633 | #ifdef CONFIG_X86_64 | ||
539 | static int ablk_ctr_init(struct crypto_tfm *tfm) | 634 | static int ablk_ctr_init(struct crypto_tfm *tfm) |
540 | { | 635 | { |
541 | struct cryptd_ablkcipher *cryptd_tfm; | 636 | struct cryptd_ablkcipher *cryptd_tfm; |
@@ -612,6 +707,7 @@ static struct crypto_alg ablk_rfc3686_ctr_alg = { | |||
612 | }, | 707 | }, |
613 | }; | 708 | }; |
614 | #endif | 709 | #endif |
710 | #endif | ||
615 | 711 | ||
616 | #ifdef HAS_LRW | 712 | #ifdef HAS_LRW |
617 | static int ablk_lrw_init(struct crypto_tfm *tfm) | 713 | static int ablk_lrw_init(struct crypto_tfm *tfm) |
@@ -730,6 +826,432 @@ static struct crypto_alg ablk_xts_alg = { | |||
730 | }; | 826 | }; |
731 | #endif | 827 | #endif |
732 | 828 | ||
829 | #ifdef CONFIG_X86_64 | ||
830 | static int rfc4106_init(struct crypto_tfm *tfm) | ||
831 | { | ||
832 | struct cryptd_aead *cryptd_tfm; | ||
833 | struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) | ||
834 | PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); | ||
835 | struct crypto_aead *cryptd_child; | ||
836 | struct aesni_rfc4106_gcm_ctx *child_ctx; | ||
837 | cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0); | ||
838 | if (IS_ERR(cryptd_tfm)) | ||
839 | return PTR_ERR(cryptd_tfm); | ||
840 | |||
841 | cryptd_child = cryptd_aead_child(cryptd_tfm); | ||
842 | child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child); | ||
843 | memcpy(child_ctx, ctx, sizeof(*ctx)); | ||
844 | ctx->cryptd_tfm = cryptd_tfm; | ||
845 | tfm->crt_aead.reqsize = sizeof(struct aead_request) | ||
846 | + crypto_aead_reqsize(&cryptd_tfm->base); | ||
847 | return 0; | ||
848 | } | ||
849 | |||
850 | static void rfc4106_exit(struct crypto_tfm *tfm) | ||
851 | { | ||
852 | struct aesni_rfc4106_gcm_ctx *ctx = | ||
853 | (struct aesni_rfc4106_gcm_ctx *) | ||
854 | PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); | ||
855 | if (!IS_ERR(ctx->cryptd_tfm)) | ||
856 | cryptd_free_aead(ctx->cryptd_tfm); | ||
857 | return; | ||
858 | } | ||
859 | |||
860 | static void | ||
861 | rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err) | ||
862 | { | ||
863 | struct aesni_gcm_set_hash_subkey_result *result = req->data; | ||
864 | |||
865 | if (err == -EINPROGRESS) | ||
866 | return; | ||
867 | result->err = err; | ||
868 | complete(&result->completion); | ||
869 | } | ||
870 | |||
871 | static int | ||
872 | rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len) | ||
873 | { | ||
874 | struct crypto_ablkcipher *ctr_tfm; | ||
875 | struct ablkcipher_request *req; | ||
876 | int ret = -EINVAL; | ||
877 | struct aesni_hash_subkey_req_data *req_data; | ||
878 | |||
879 | ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0); | ||
880 | if (IS_ERR(ctr_tfm)) | ||
881 | return PTR_ERR(ctr_tfm); | ||
882 | |||
883 | crypto_ablkcipher_clear_flags(ctr_tfm, ~0); | ||
884 | |||
885 | ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len); | ||
886 | if (ret) | ||
887 | goto out_free_ablkcipher; | ||
888 | |||
889 | ret = -ENOMEM; | ||
890 | req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL); | ||
891 | if (!req) | ||
892 | goto out_free_ablkcipher; | ||
893 | |||
894 | req_data = kmalloc(sizeof(*req_data), GFP_KERNEL); | ||
895 | if (!req_data) | ||
896 | goto out_free_request; | ||
897 | |||
898 | memset(req_data->iv, 0, sizeof(req_data->iv)); | ||
899 | |||
900 | /* Clear the data in the hash sub key container to zero.*/ | ||
901 | /* We want to cipher all zeros to create the hash sub key. */ | ||
902 | memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE); | ||
903 | |||
904 | init_completion(&req_data->result.completion); | ||
905 | sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE); | ||
906 | ablkcipher_request_set_tfm(req, ctr_tfm); | ||
907 | ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP | | ||
908 | CRYPTO_TFM_REQ_MAY_BACKLOG, | ||
909 | rfc4106_set_hash_subkey_done, | ||
910 | &req_data->result); | ||
911 | |||
912 | ablkcipher_request_set_crypt(req, &req_data->sg, | ||
913 | &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv); | ||
914 | |||
915 | ret = crypto_ablkcipher_encrypt(req); | ||
916 | if (ret == -EINPROGRESS || ret == -EBUSY) { | ||
917 | ret = wait_for_completion_interruptible | ||
918 | (&req_data->result.completion); | ||
919 | if (!ret) | ||
920 | ret = req_data->result.err; | ||
921 | } | ||
922 | kfree(req_data); | ||
923 | out_free_request: | ||
924 | ablkcipher_request_free(req); | ||
925 | out_free_ablkcipher: | ||
926 | crypto_free_ablkcipher(ctr_tfm); | ||
927 | return ret; | ||
928 | } | ||
929 | |||
930 | static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, | ||
931 | unsigned int key_len) | ||
932 | { | ||
933 | int ret = 0; | ||
934 | struct crypto_tfm *tfm = crypto_aead_tfm(parent); | ||
935 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); | ||
936 | struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); | ||
937 | struct aesni_rfc4106_gcm_ctx *child_ctx = | ||
938 | aesni_rfc4106_gcm_ctx_get(cryptd_child); | ||
939 | u8 *new_key_mem = NULL; | ||
940 | |||
941 | if (key_len < 4) { | ||
942 | crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
943 | return -EINVAL; | ||
944 | } | ||
945 | /*Account for 4 byte nonce at the end.*/ | ||
946 | key_len -= 4; | ||
947 | if (key_len != AES_KEYSIZE_128) { | ||
948 | crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
949 | return -EINVAL; | ||
950 | } | ||
951 | |||
952 | memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce)); | ||
953 | /*This must be on a 16 byte boundary!*/ | ||
954 | if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN) | ||
955 | return -EINVAL; | ||
956 | |||
957 | if ((unsigned long)key % AESNI_ALIGN) { | ||
958 | /*key is not aligned: use an auxuliar aligned pointer*/ | ||
959 | new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL); | ||
960 | if (!new_key_mem) | ||
961 | return -ENOMEM; | ||
962 | |||
963 | new_key_mem = PTR_ALIGN(new_key_mem, AESNI_ALIGN); | ||
964 | memcpy(new_key_mem, key, key_len); | ||
965 | key = new_key_mem; | ||
966 | } | ||
967 | |||
968 | if (!irq_fpu_usable()) | ||
969 | ret = crypto_aes_expand_key(&(ctx->aes_key_expanded), | ||
970 | key, key_len); | ||
971 | else { | ||
972 | kernel_fpu_begin(); | ||
973 | ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len); | ||
974 | kernel_fpu_end(); | ||
975 | } | ||
976 | /*This must be on a 16 byte boundary!*/ | ||
977 | if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) { | ||
978 | ret = -EINVAL; | ||
979 | goto exit; | ||
980 | } | ||
981 | ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); | ||
982 | memcpy(child_ctx, ctx, sizeof(*ctx)); | ||
983 | exit: | ||
984 | kfree(new_key_mem); | ||
985 | return ret; | ||
986 | } | ||
987 | |||
988 | /* This is the Integrity Check Value (aka the authentication tag length and can | ||
989 | * be 8, 12 or 16 bytes long. */ | ||
990 | static int rfc4106_set_authsize(struct crypto_aead *parent, | ||
991 | unsigned int authsize) | ||
992 | { | ||
993 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); | ||
994 | struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); | ||
995 | |||
996 | switch (authsize) { | ||
997 | case 8: | ||
998 | case 12: | ||
999 | case 16: | ||
1000 | break; | ||
1001 | default: | ||
1002 | return -EINVAL; | ||
1003 | } | ||
1004 | crypto_aead_crt(parent)->authsize = authsize; | ||
1005 | crypto_aead_crt(cryptd_child)->authsize = authsize; | ||
1006 | return 0; | ||
1007 | } | ||
1008 | |||
1009 | static int rfc4106_encrypt(struct aead_request *req) | ||
1010 | { | ||
1011 | int ret; | ||
1012 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
1013 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); | ||
1014 | |||
1015 | if (!irq_fpu_usable()) { | ||
1016 | struct aead_request *cryptd_req = | ||
1017 | (struct aead_request *) aead_request_ctx(req); | ||
1018 | memcpy(cryptd_req, req, sizeof(*req)); | ||
1019 | aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); | ||
1020 | return crypto_aead_encrypt(cryptd_req); | ||
1021 | } else { | ||
1022 | struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); | ||
1023 | kernel_fpu_begin(); | ||
1024 | ret = cryptd_child->base.crt_aead.encrypt(req); | ||
1025 | kernel_fpu_end(); | ||
1026 | return ret; | ||
1027 | } | ||
1028 | } | ||
1029 | |||
1030 | static int rfc4106_decrypt(struct aead_request *req) | ||
1031 | { | ||
1032 | int ret; | ||
1033 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
1034 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); | ||
1035 | |||
1036 | if (!irq_fpu_usable()) { | ||
1037 | struct aead_request *cryptd_req = | ||
1038 | (struct aead_request *) aead_request_ctx(req); | ||
1039 | memcpy(cryptd_req, req, sizeof(*req)); | ||
1040 | aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); | ||
1041 | return crypto_aead_decrypt(cryptd_req); | ||
1042 | } else { | ||
1043 | struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); | ||
1044 | kernel_fpu_begin(); | ||
1045 | ret = cryptd_child->base.crt_aead.decrypt(req); | ||
1046 | kernel_fpu_end(); | ||
1047 | return ret; | ||
1048 | } | ||
1049 | } | ||
1050 | |||
1051 | static struct crypto_alg rfc4106_alg = { | ||
1052 | .cra_name = "rfc4106(gcm(aes))", | ||
1053 | .cra_driver_name = "rfc4106-gcm-aesni", | ||
1054 | .cra_priority = 400, | ||
1055 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | ||
1056 | .cra_blocksize = 1, | ||
1057 | .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN, | ||
1058 | .cra_alignmask = 0, | ||
1059 | .cra_type = &crypto_nivaead_type, | ||
1060 | .cra_module = THIS_MODULE, | ||
1061 | .cra_list = LIST_HEAD_INIT(rfc4106_alg.cra_list), | ||
1062 | .cra_init = rfc4106_init, | ||
1063 | .cra_exit = rfc4106_exit, | ||
1064 | .cra_u = { | ||
1065 | .aead = { | ||
1066 | .setkey = rfc4106_set_key, | ||
1067 | .setauthsize = rfc4106_set_authsize, | ||
1068 | .encrypt = rfc4106_encrypt, | ||
1069 | .decrypt = rfc4106_decrypt, | ||
1070 | .geniv = "seqiv", | ||
1071 | .ivsize = 8, | ||
1072 | .maxauthsize = 16, | ||
1073 | }, | ||
1074 | }, | ||
1075 | }; | ||
1076 | |||
1077 | static int __driver_rfc4106_encrypt(struct aead_request *req) | ||
1078 | { | ||
1079 | u8 one_entry_in_sg = 0; | ||
1080 | u8 *src, *dst, *assoc; | ||
1081 | __be32 counter = cpu_to_be32(1); | ||
1082 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
1083 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); | ||
1084 | void *aes_ctx = &(ctx->aes_key_expanded); | ||
1085 | unsigned long auth_tag_len = crypto_aead_authsize(tfm); | ||
1086 | u8 iv_tab[16+AESNI_ALIGN]; | ||
1087 | u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN); | ||
1088 | struct scatter_walk src_sg_walk; | ||
1089 | struct scatter_walk assoc_sg_walk; | ||
1090 | struct scatter_walk dst_sg_walk; | ||
1091 | unsigned int i; | ||
1092 | |||
1093 | /* Assuming we are supporting rfc4106 64-bit extended */ | ||
1094 | /* sequence numbers We need to have the AAD length equal */ | ||
1095 | /* to 8 or 12 bytes */ | ||
1096 | if (unlikely(req->assoclen != 8 && req->assoclen != 12)) | ||
1097 | return -EINVAL; | ||
1098 | /* IV below built */ | ||
1099 | for (i = 0; i < 4; i++) | ||
1100 | *(iv+i) = ctx->nonce[i]; | ||
1101 | for (i = 0; i < 8; i++) | ||
1102 | *(iv+4+i) = req->iv[i]; | ||
1103 | *((__be32 *)(iv+12)) = counter; | ||
1104 | |||
1105 | if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) { | ||
1106 | one_entry_in_sg = 1; | ||
1107 | scatterwalk_start(&src_sg_walk, req->src); | ||
1108 | scatterwalk_start(&assoc_sg_walk, req->assoc); | ||
1109 | src = scatterwalk_map(&src_sg_walk, 0); | ||
1110 | assoc = scatterwalk_map(&assoc_sg_walk, 0); | ||
1111 | dst = src; | ||
1112 | if (unlikely(req->src != req->dst)) { | ||
1113 | scatterwalk_start(&dst_sg_walk, req->dst); | ||
1114 | dst = scatterwalk_map(&dst_sg_walk, 0); | ||
1115 | } | ||
1116 | |||
1117 | } else { | ||
1118 | /* Allocate memory for src, dst, assoc */ | ||
1119 | src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen, | ||
1120 | GFP_ATOMIC); | ||
1121 | if (unlikely(!src)) | ||
1122 | return -ENOMEM; | ||
1123 | assoc = (src + req->cryptlen + auth_tag_len); | ||
1124 | scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0); | ||
1125 | scatterwalk_map_and_copy(assoc, req->assoc, 0, | ||
1126 | req->assoclen, 0); | ||
1127 | dst = src; | ||
1128 | } | ||
1129 | |||
1130 | aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv, | ||
1131 | ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst | ||
1132 | + ((unsigned long)req->cryptlen), auth_tag_len); | ||
1133 | |||
1134 | /* The authTag (aka the Integrity Check Value) needs to be written | ||
1135 | * back to the packet. */ | ||
1136 | if (one_entry_in_sg) { | ||
1137 | if (unlikely(req->src != req->dst)) { | ||
1138 | scatterwalk_unmap(dst, 0); | ||
1139 | scatterwalk_done(&dst_sg_walk, 0, 0); | ||
1140 | } | ||
1141 | scatterwalk_unmap(src, 0); | ||
1142 | scatterwalk_unmap(assoc, 0); | ||
1143 | scatterwalk_done(&src_sg_walk, 0, 0); | ||
1144 | scatterwalk_done(&assoc_sg_walk, 0, 0); | ||
1145 | } else { | ||
1146 | scatterwalk_map_and_copy(dst, req->dst, 0, | ||
1147 | req->cryptlen + auth_tag_len, 1); | ||
1148 | kfree(src); | ||
1149 | } | ||
1150 | return 0; | ||
1151 | } | ||
1152 | |||
1153 | static int __driver_rfc4106_decrypt(struct aead_request *req) | ||
1154 | { | ||
1155 | u8 one_entry_in_sg = 0; | ||
1156 | u8 *src, *dst, *assoc; | ||
1157 | unsigned long tempCipherLen = 0; | ||
1158 | __be32 counter = cpu_to_be32(1); | ||
1159 | int retval = 0; | ||
1160 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
1161 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); | ||
1162 | void *aes_ctx = &(ctx->aes_key_expanded); | ||
1163 | unsigned long auth_tag_len = crypto_aead_authsize(tfm); | ||
1164 | u8 iv_and_authTag[32+AESNI_ALIGN]; | ||
1165 | u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN); | ||
1166 | u8 *authTag = iv + 16; | ||
1167 | struct scatter_walk src_sg_walk; | ||
1168 | struct scatter_walk assoc_sg_walk; | ||
1169 | struct scatter_walk dst_sg_walk; | ||
1170 | unsigned int i; | ||
1171 | |||
1172 | if (unlikely((req->cryptlen < auth_tag_len) || | ||
1173 | (req->assoclen != 8 && req->assoclen != 12))) | ||
1174 | return -EINVAL; | ||
1175 | /* Assuming we are supporting rfc4106 64-bit extended */ | ||
1176 | /* sequence numbers We need to have the AAD length */ | ||
1177 | /* equal to 8 or 12 bytes */ | ||
1178 | |||
1179 | tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len); | ||
1180 | /* IV below built */ | ||
1181 | for (i = 0; i < 4; i++) | ||
1182 | *(iv+i) = ctx->nonce[i]; | ||
1183 | for (i = 0; i < 8; i++) | ||
1184 | *(iv+4+i) = req->iv[i]; | ||
1185 | *((__be32 *)(iv+12)) = counter; | ||
1186 | |||
1187 | if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) { | ||
1188 | one_entry_in_sg = 1; | ||
1189 | scatterwalk_start(&src_sg_walk, req->src); | ||
1190 | scatterwalk_start(&assoc_sg_walk, req->assoc); | ||
1191 | src = scatterwalk_map(&src_sg_walk, 0); | ||
1192 | assoc = scatterwalk_map(&assoc_sg_walk, 0); | ||
1193 | dst = src; | ||
1194 | if (unlikely(req->src != req->dst)) { | ||
1195 | scatterwalk_start(&dst_sg_walk, req->dst); | ||
1196 | dst = scatterwalk_map(&dst_sg_walk, 0); | ||
1197 | } | ||
1198 | |||
1199 | } else { | ||
1200 | /* Allocate memory for src, dst, assoc */ | ||
1201 | src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC); | ||
1202 | if (!src) | ||
1203 | return -ENOMEM; | ||
1204 | assoc = (src + req->cryptlen + auth_tag_len); | ||
1205 | scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0); | ||
1206 | scatterwalk_map_and_copy(assoc, req->assoc, 0, | ||
1207 | req->assoclen, 0); | ||
1208 | dst = src; | ||
1209 | } | ||
1210 | |||
1211 | aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv, | ||
1212 | ctx->hash_subkey, assoc, (unsigned long)req->assoclen, | ||
1213 | authTag, auth_tag_len); | ||
1214 | |||
1215 | /* Compare generated tag with passed in tag. */ | ||
1216 | retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ? | ||
1217 | -EBADMSG : 0; | ||
1218 | |||
1219 | if (one_entry_in_sg) { | ||
1220 | if (unlikely(req->src != req->dst)) { | ||
1221 | scatterwalk_unmap(dst, 0); | ||
1222 | scatterwalk_done(&dst_sg_walk, 0, 0); | ||
1223 | } | ||
1224 | scatterwalk_unmap(src, 0); | ||
1225 | scatterwalk_unmap(assoc, 0); | ||
1226 | scatterwalk_done(&src_sg_walk, 0, 0); | ||
1227 | scatterwalk_done(&assoc_sg_walk, 0, 0); | ||
1228 | } else { | ||
1229 | scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1); | ||
1230 | kfree(src); | ||
1231 | } | ||
1232 | return retval; | ||
1233 | } | ||
1234 | |||
1235 | static struct crypto_alg __rfc4106_alg = { | ||
1236 | .cra_name = "__gcm-aes-aesni", | ||
1237 | .cra_driver_name = "__driver-gcm-aes-aesni", | ||
1238 | .cra_priority = 0, | ||
1239 | .cra_flags = CRYPTO_ALG_TYPE_AEAD, | ||
1240 | .cra_blocksize = 1, | ||
1241 | .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN, | ||
1242 | .cra_alignmask = 0, | ||
1243 | .cra_type = &crypto_aead_type, | ||
1244 | .cra_module = THIS_MODULE, | ||
1245 | .cra_list = LIST_HEAD_INIT(__rfc4106_alg.cra_list), | ||
1246 | .cra_u = { | ||
1247 | .aead = { | ||
1248 | .encrypt = __driver_rfc4106_encrypt, | ||
1249 | .decrypt = __driver_rfc4106_decrypt, | ||
1250 | }, | ||
1251 | }, | ||
1252 | }; | ||
1253 | #endif | ||
1254 | |||
733 | static int __init aesni_init(void) | 1255 | static int __init aesni_init(void) |
734 | { | 1256 | { |
735 | int err; | 1257 | int err; |
@@ -738,6 +1260,9 @@ static int __init aesni_init(void) | |||
738 | printk(KERN_INFO "Intel AES-NI instructions are not detected.\n"); | 1260 | printk(KERN_INFO "Intel AES-NI instructions are not detected.\n"); |
739 | return -ENODEV; | 1261 | return -ENODEV; |
740 | } | 1262 | } |
1263 | |||
1264 | if ((err = crypto_fpu_init())) | ||
1265 | goto fpu_err; | ||
741 | if ((err = crypto_register_alg(&aesni_alg))) | 1266 | if ((err = crypto_register_alg(&aesni_alg))) |
742 | goto aes_err; | 1267 | goto aes_err; |
743 | if ((err = crypto_register_alg(&__aesni_alg))) | 1268 | if ((err = crypto_register_alg(&__aesni_alg))) |
@@ -746,18 +1271,24 @@ static int __init aesni_init(void) | |||
746 | goto blk_ecb_err; | 1271 | goto blk_ecb_err; |
747 | if ((err = crypto_register_alg(&blk_cbc_alg))) | 1272 | if ((err = crypto_register_alg(&blk_cbc_alg))) |
748 | goto blk_cbc_err; | 1273 | goto blk_cbc_err; |
749 | if ((err = crypto_register_alg(&blk_ctr_alg))) | ||
750 | goto blk_ctr_err; | ||
751 | if ((err = crypto_register_alg(&ablk_ecb_alg))) | 1274 | if ((err = crypto_register_alg(&ablk_ecb_alg))) |
752 | goto ablk_ecb_err; | 1275 | goto ablk_ecb_err; |
753 | if ((err = crypto_register_alg(&ablk_cbc_alg))) | 1276 | if ((err = crypto_register_alg(&ablk_cbc_alg))) |
754 | goto ablk_cbc_err; | 1277 | goto ablk_cbc_err; |
1278 | #ifdef CONFIG_X86_64 | ||
1279 | if ((err = crypto_register_alg(&blk_ctr_alg))) | ||
1280 | goto blk_ctr_err; | ||
755 | if ((err = crypto_register_alg(&ablk_ctr_alg))) | 1281 | if ((err = crypto_register_alg(&ablk_ctr_alg))) |
756 | goto ablk_ctr_err; | 1282 | goto ablk_ctr_err; |
1283 | if ((err = crypto_register_alg(&__rfc4106_alg))) | ||
1284 | goto __aead_gcm_err; | ||
1285 | if ((err = crypto_register_alg(&rfc4106_alg))) | ||
1286 | goto aead_gcm_err; | ||
757 | #ifdef HAS_CTR | 1287 | #ifdef HAS_CTR |
758 | if ((err = crypto_register_alg(&ablk_rfc3686_ctr_alg))) | 1288 | if ((err = crypto_register_alg(&ablk_rfc3686_ctr_alg))) |
759 | goto ablk_rfc3686_ctr_err; | 1289 | goto ablk_rfc3686_ctr_err; |
760 | #endif | 1290 | #endif |
1291 | #endif | ||
761 | #ifdef HAS_LRW | 1292 | #ifdef HAS_LRW |
762 | if ((err = crypto_register_alg(&ablk_lrw_alg))) | 1293 | if ((err = crypto_register_alg(&ablk_lrw_alg))) |
763 | goto ablk_lrw_err; | 1294 | goto ablk_lrw_err; |
@@ -770,7 +1301,6 @@ static int __init aesni_init(void) | |||
770 | if ((err = crypto_register_alg(&ablk_xts_alg))) | 1301 | if ((err = crypto_register_alg(&ablk_xts_alg))) |
771 | goto ablk_xts_err; | 1302 | goto ablk_xts_err; |
772 | #endif | 1303 | #endif |
773 | |||
774 | return err; | 1304 | return err; |
775 | 1305 | ||
776 | #ifdef HAS_XTS | 1306 | #ifdef HAS_XTS |
@@ -784,18 +1314,24 @@ ablk_pcbc_err: | |||
784 | crypto_unregister_alg(&ablk_lrw_alg); | 1314 | crypto_unregister_alg(&ablk_lrw_alg); |
785 | ablk_lrw_err: | 1315 | ablk_lrw_err: |
786 | #endif | 1316 | #endif |
1317 | #ifdef CONFIG_X86_64 | ||
787 | #ifdef HAS_CTR | 1318 | #ifdef HAS_CTR |
788 | crypto_unregister_alg(&ablk_rfc3686_ctr_alg); | 1319 | crypto_unregister_alg(&ablk_rfc3686_ctr_alg); |
789 | ablk_rfc3686_ctr_err: | 1320 | ablk_rfc3686_ctr_err: |
790 | #endif | 1321 | #endif |
1322 | crypto_unregister_alg(&rfc4106_alg); | ||
1323 | aead_gcm_err: | ||
1324 | crypto_unregister_alg(&__rfc4106_alg); | ||
1325 | __aead_gcm_err: | ||
791 | crypto_unregister_alg(&ablk_ctr_alg); | 1326 | crypto_unregister_alg(&ablk_ctr_alg); |
792 | ablk_ctr_err: | 1327 | ablk_ctr_err: |
1328 | crypto_unregister_alg(&blk_ctr_alg); | ||
1329 | blk_ctr_err: | ||
1330 | #endif | ||
793 | crypto_unregister_alg(&ablk_cbc_alg); | 1331 | crypto_unregister_alg(&ablk_cbc_alg); |
794 | ablk_cbc_err: | 1332 | ablk_cbc_err: |
795 | crypto_unregister_alg(&ablk_ecb_alg); | 1333 | crypto_unregister_alg(&ablk_ecb_alg); |
796 | ablk_ecb_err: | 1334 | ablk_ecb_err: |
797 | crypto_unregister_alg(&blk_ctr_alg); | ||
798 | blk_ctr_err: | ||
799 | crypto_unregister_alg(&blk_cbc_alg); | 1335 | crypto_unregister_alg(&blk_cbc_alg); |
800 | blk_cbc_err: | 1336 | blk_cbc_err: |
801 | crypto_unregister_alg(&blk_ecb_alg); | 1337 | crypto_unregister_alg(&blk_ecb_alg); |
@@ -804,6 +1340,7 @@ blk_ecb_err: | |||
804 | __aes_err: | 1340 | __aes_err: |
805 | crypto_unregister_alg(&aesni_alg); | 1341 | crypto_unregister_alg(&aesni_alg); |
806 | aes_err: | 1342 | aes_err: |
1343 | fpu_err: | ||
807 | return err; | 1344 | return err; |
808 | } | 1345 | } |
809 | 1346 | ||
@@ -818,17 +1355,23 @@ static void __exit aesni_exit(void) | |||
818 | #ifdef HAS_LRW | 1355 | #ifdef HAS_LRW |
819 | crypto_unregister_alg(&ablk_lrw_alg); | 1356 | crypto_unregister_alg(&ablk_lrw_alg); |
820 | #endif | 1357 | #endif |
1358 | #ifdef CONFIG_X86_64 | ||
821 | #ifdef HAS_CTR | 1359 | #ifdef HAS_CTR |
822 | crypto_unregister_alg(&ablk_rfc3686_ctr_alg); | 1360 | crypto_unregister_alg(&ablk_rfc3686_ctr_alg); |
823 | #endif | 1361 | #endif |
1362 | crypto_unregister_alg(&rfc4106_alg); | ||
1363 | crypto_unregister_alg(&__rfc4106_alg); | ||
824 | crypto_unregister_alg(&ablk_ctr_alg); | 1364 | crypto_unregister_alg(&ablk_ctr_alg); |
1365 | crypto_unregister_alg(&blk_ctr_alg); | ||
1366 | #endif | ||
825 | crypto_unregister_alg(&ablk_cbc_alg); | 1367 | crypto_unregister_alg(&ablk_cbc_alg); |
826 | crypto_unregister_alg(&ablk_ecb_alg); | 1368 | crypto_unregister_alg(&ablk_ecb_alg); |
827 | crypto_unregister_alg(&blk_ctr_alg); | ||
828 | crypto_unregister_alg(&blk_cbc_alg); | 1369 | crypto_unregister_alg(&blk_cbc_alg); |
829 | crypto_unregister_alg(&blk_ecb_alg); | 1370 | crypto_unregister_alg(&blk_ecb_alg); |
830 | crypto_unregister_alg(&__aesni_alg); | 1371 | crypto_unregister_alg(&__aesni_alg); |
831 | crypto_unregister_alg(&aesni_alg); | 1372 | crypto_unregister_alg(&aesni_alg); |
1373 | |||
1374 | crypto_fpu_exit(); | ||
832 | } | 1375 | } |
833 | 1376 | ||
834 | module_init(aesni_init); | 1377 | module_init(aesni_init); |