aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-09-22 15:51:33 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-22 15:51:33 -0400
commit6bbd9b6d694ff7242d63cda2faac4bd59ee4328e (patch)
tree0641aa896e2ea01f4692973e5fbea429408854f4
parenta489d159229fcc07bbb7566ac4fac745b79197ad (diff)
parent3c164bd8153c4644a22dc2101b003c67cd2a0d0a (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (64 commits) [BLOCK] dm-crypt: trivial comment improvements [CRYPTO] api: Deprecate crypto_digest_* and crypto_alg_available [CRYPTO] padlock: Convert padlock-sha to use crypto_hash [CRYPTO] users: Use crypto_comp and crypto_has_* [CRYPTO] api: Add crypto_comp and crypto_has_* [CRYPTO] users: Use crypto_hash interface instead of crypto_digest [SCSI] iscsi: Use crypto_hash interface instead of crypto_digest [CRYPTO] digest: Remove old HMAC implementation [CRYPTO] doc: Update documentation for hash and me [SCTP]: Use HMAC template and hash interface [IPSEC]: Use HMAC template and hash interface [CRYPTO] tcrypt: Use HMAC template and hash interface [CRYPTO] hmac: Add crypto template implementation [CRYPTO] digest: Added user API for new hash type [CRYPTO] api: Mark parts of cipher interface as deprecated [PATCH] scatterlist: Add const to sg_set_buf/sg_init_one pointer argument [CRYPTO] drivers: Remove obsolete block cipher operations [CRYPTO] users: Use block ciphers where applicable [SUNRPC] GSS: Use block ciphers where applicable [IPSEC] ESP: Use block ciphers where applicable ...
-rw-r--r--Documentation/crypto/api-intro.txt36
-rw-r--r--arch/i386/crypto/Makefile3
-rw-r--r--arch/i386/crypto/aes.c3
-rw-r--r--arch/i386/crypto/twofish-i586-asm.S335
-rw-r--r--arch/i386/crypto/twofish.c97
-rw-r--r--arch/s390/crypto/aes_s390.c285
-rw-r--r--arch/s390/crypto/crypt_s390.h3
-rw-r--r--arch/s390/crypto/des_s390.c559
-rw-r--r--arch/s390/crypto/sha1_s390.c2
-rw-r--r--arch/s390/crypto/sha256_s390.c2
-rw-r--r--arch/x86_64/crypto/Makefile3
-rw-r--r--arch/x86_64/crypto/aes.c5
-rw-r--r--arch/x86_64/crypto/twofish-x86_64-asm.S324
-rw-r--r--arch/x86_64/crypto/twofish.c97
-rw-r--r--crypto/Kconfig154
-rw-r--r--crypto/Makefile16
-rw-r--r--crypto/aes.c5
-rw-r--r--crypto/algapi.c486
-rw-r--r--crypto/anubis.c3
-rw-r--r--crypto/api.c428
-rw-r--r--crypto/arc4.c2
-rw-r--r--crypto/blkcipher.c405
-rw-r--r--crypto/blowfish.c3
-rw-r--r--crypto/cast5.c8
-rw-r--r--crypto/cast6.c5
-rw-r--r--crypto/cbc.c344
-rw-r--r--crypto/cipher.c117
-rw-r--r--crypto/crc32c.c30
-rw-r--r--crypto/crypto_null.c2
-rw-r--r--crypto/cryptomgr.c156
-rw-r--r--crypto/des.c6
-rw-r--r--crypto/digest.c155
-rw-r--r--crypto/ecb.c181
-rw-r--r--crypto/hash.c61
-rw-r--r--crypto/hmac.c278
-rw-r--r--crypto/internal.h106
-rw-r--r--crypto/khazad.c8
-rw-r--r--crypto/michael_mic.c5
-rw-r--r--crypto/proc.c13
-rw-r--r--crypto/scatterwalk.c89
-rw-r--r--crypto/scatterwalk.h52
-rw-r--r--crypto/serpent.c19
-rw-r--r--crypto/sha1.c3
-rw-r--r--crypto/sha256.c3
-rw-r--r--crypto/tcrypt.c901
-rw-r--r--crypto/tcrypt.h202
-rw-r--r--crypto/tea.c16
-rw-r--r--crypto/twofish.c700
-rw-r--r--crypto/twofish_common.c744
-rw-r--r--drivers/block/cryptoloop.c160
-rw-r--r--drivers/crypto/Kconfig45
-rw-r--r--drivers/crypto/Makefile8
-rw-r--r--drivers/crypto/padlock-aes.c258
-rw-r--r--drivers/crypto/padlock-generic.c63
-rw-r--r--drivers/crypto/padlock-sha.c318
-rw-r--r--drivers/crypto/padlock.c58
-rw-r--r--drivers/crypto/padlock.h17
-rw-r--r--drivers/md/dm-crypt.c146
-rw-r--r--drivers/net/ppp_mppe.c68
-rw-r--r--drivers/net/wireless/airo.c22
-rw-r--r--drivers/scsi/iscsi_tcp.c134
-rw-r--r--drivers/scsi/iscsi_tcp.h9
-rw-r--r--fs/nfsd/nfs4recover.c21
-rw-r--r--include/crypto/algapi.h156
-rw-r--r--include/crypto/twofish.h22
-rw-r--r--include/linux/crypto.h689
-rw-r--r--include/linux/scatterlist.h4
-rw-r--r--include/linux/sunrpc/gss_krb5.h19
-rw-r--r--include/linux/sunrpc/gss_spkm3.h4
-rw-r--r--include/net/ah.h30
-rw-r--r--include/net/esp.h31
-rw-r--r--include/net/ipcomp.h5
-rw-r--r--include/net/sctp/constants.h4
-rw-r--r--include/net/sctp/sctp.h11
-rw-r--r--include/net/sctp/structs.h3
-rw-r--r--include/net/xfrm.h12
-rw-r--r--net/ieee80211/ieee80211_crypt_ccmp.c32
-rw-r--r--net/ieee80211/ieee80211_crypt_tkip.c59
-rw-r--r--net/ieee80211/ieee80211_crypt_wep.c25
-rw-r--r--net/ipv4/Kconfig1
-rw-r--r--net/ipv4/ah4.c36
-rw-r--r--net/ipv4/esp4.c85
-rw-r--r--net/ipv4/ipcomp.c25
-rw-r--r--net/ipv6/Kconfig1
-rw-r--r--net/ipv6/ah6.c35
-rw-r--r--net/ipv6/esp6.c90
-rw-r--r--net/ipv6/ipcomp6.c25
-rw-r--r--net/sctp/endpointola.c2
-rw-r--r--net/sctp/sm_make_chunk.c37
-rw-r--r--net/sctp/socket.c6
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c95
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c24
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seqnum.c4
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c4
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_mech.c29
-rw-r--r--net/xfrm/xfrm_algo.c94
-rw-r--r--net/xfrm/xfrm_user.c2
-rw-r--r--security/seclvl.c18
98 files changed, 7726 insertions, 2780 deletions
diff --git a/Documentation/crypto/api-intro.txt b/Documentation/crypto/api-intro.txt
index 74dffc68ff9f..5a03a2801d67 100644
--- a/Documentation/crypto/api-intro.txt
+++ b/Documentation/crypto/api-intro.txt
@@ -19,15 +19,14 @@ At the lowest level are algorithms, which register dynamically with the
19API. 19API.
20 20
21'Transforms' are user-instantiated objects, which maintain state, handle all 21'Transforms' are user-instantiated objects, which maintain state, handle all
22of the implementation logic (e.g. manipulating page vectors), provide an 22of the implementation logic (e.g. manipulating page vectors) and provide an
23abstraction to the underlying algorithms, and handle common logical 23abstraction to the underlying algorithms. However, at the user
24operations (e.g. cipher modes, HMAC for digests). However, at the user
25level they are very simple. 24level they are very simple.
26 25
27Conceptually, the API layering looks like this: 26Conceptually, the API layering looks like this:
28 27
29 [transform api] (user interface) 28 [transform api] (user interface)
30 [transform ops] (per-type logic glue e.g. cipher.c, digest.c) 29 [transform ops] (per-type logic glue e.g. cipher.c, compress.c)
31 [algorithm api] (for registering algorithms) 30 [algorithm api] (for registering algorithms)
32 31
33The idea is to make the user interface and algorithm registration API 32The idea is to make the user interface and algorithm registration API
@@ -44,22 +43,27 @@ under development.
44Here's an example of how to use the API: 43Here's an example of how to use the API:
45 44
46 #include <linux/crypto.h> 45 #include <linux/crypto.h>
46 #include <linux/err.h>
47 #include <linux/scatterlist.h>
47 48
48 struct scatterlist sg[2]; 49 struct scatterlist sg[2];
49 char result[128]; 50 char result[128];
50 struct crypto_tfm *tfm; 51 struct crypto_hash *tfm;
52 struct hash_desc desc;
51 53
52 tfm = crypto_alloc_tfm("md5", 0); 54 tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
53 if (tfm == NULL) 55 if (IS_ERR(tfm))
54 fail(); 56 fail();
55 57
56 /* ... set up the scatterlists ... */ 58 /* ... set up the scatterlists ... */
59
60 desc.tfm = tfm;
61 desc.flags = 0;
57 62
58 crypto_digest_init(tfm); 63 if (crypto_hash_digest(&desc, &sg, 2, result))
59 crypto_digest_update(tfm, &sg, 2); 64 fail();
60 crypto_digest_final(tfm, result);
61 65
62 crypto_free_tfm(tfm); 66 crypto_free_hash(tfm);
63 67
64 68
65Many real examples are available in the regression test module (tcrypt.c). 69Many real examples are available in the regression test module (tcrypt.c).
@@ -126,7 +130,7 @@ might already be working on.
126BUGS 130BUGS
127 131
128Send bug reports to: 132Send bug reports to:
129James Morris <jmorris@redhat.com> 133Herbert Xu <herbert@gondor.apana.org.au>
130Cc: David S. Miller <davem@redhat.com> 134Cc: David S. Miller <davem@redhat.com>
131 135
132 136
@@ -134,13 +138,14 @@ FURTHER INFORMATION
134 138
135For further patches and various updates, including the current TODO 139For further patches and various updates, including the current TODO
136list, see: 140list, see:
137http://samba.org/~jamesm/crypto/ 141http://gondor.apana.org.au/~herbert/crypto/
138 142
139 143
140AUTHORS 144AUTHORS
141 145
142James Morris 146James Morris
143David S. Miller 147David S. Miller
148Herbert Xu
144 149
145 150
146CREDITS 151CREDITS
@@ -238,8 +243,11 @@ Anubis algorithm contributors:
238Tiger algorithm contributors: 243Tiger algorithm contributors:
239 Aaron Grothe 244 Aaron Grothe
240 245
246VIA PadLock contributors:
247 Michal Ludvig
248
241Generic scatterwalk code by Adam J. Richter <adam@yggdrasil.com> 249Generic scatterwalk code by Adam J. Richter <adam@yggdrasil.com>
242 250
243Please send any credits updates or corrections to: 251Please send any credits updates or corrections to:
244James Morris <jmorris@redhat.com> 252Herbert Xu <herbert@gondor.apana.org.au>
245 253
diff --git a/arch/i386/crypto/Makefile b/arch/i386/crypto/Makefile
index 103c353d0a63..3fd19af18e34 100644
--- a/arch/i386/crypto/Makefile
+++ b/arch/i386/crypto/Makefile
@@ -5,5 +5,8 @@
5# 5#
6 6
7obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o 7obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
8obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o
8 9
9aes-i586-y := aes-i586-asm.o aes.o 10aes-i586-y := aes-i586-asm.o aes.o
11twofish-i586-y := twofish-i586-asm.o twofish.o
12
diff --git a/arch/i386/crypto/aes.c b/arch/i386/crypto/aes.c
index d3806daa3de3..49aad9397f10 100644
--- a/arch/i386/crypto/aes.c
+++ b/arch/i386/crypto/aes.c
@@ -379,12 +379,13 @@ static void gen_tabs(void)
379} 379}
380 380
381static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 381static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
382 unsigned int key_len, u32 *flags) 382 unsigned int key_len)
383{ 383{
384 int i; 384 int i;
385 u32 ss[8]; 385 u32 ss[8];
386 struct aes_ctx *ctx = crypto_tfm_ctx(tfm); 386 struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
387 const __le32 *key = (const __le32 *)in_key; 387 const __le32 *key = (const __le32 *)in_key;
388 u32 *flags = &tfm->crt_flags;
388 389
389 /* encryption schedule */ 390 /* encryption schedule */
390 391
diff --git a/arch/i386/crypto/twofish-i586-asm.S b/arch/i386/crypto/twofish-i586-asm.S
new file mode 100644
index 000000000000..39b98ed2c1b9
--- /dev/null
+++ b/arch/i386/crypto/twofish-i586-asm.S
@@ -0,0 +1,335 @@
1/***************************************************************************
2* Copyright (C) 2006 by Joachim Fritschi, <jfritschi@freenet.de> *
3* *
4* This program is free software; you can redistribute it and/or modify *
5* it under the terms of the GNU General Public License as published by *
6* the Free Software Foundation; either version 2 of the License, or *
7* (at your option) any later version. *
8* *
9* This program is distributed in the hope that it will be useful, *
10* but WITHOUT ANY WARRANTY; without even the implied warranty of *
11* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12* GNU General Public License for more details. *
13* *
14* You should have received a copy of the GNU General Public License *
15* along with this program; if not, write to the *
16* Free Software Foundation, Inc., *
17* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
18***************************************************************************/
19
20.file "twofish-i586-asm.S"
21.text
22
23#include <asm/asm-offsets.h>
24
25/* return adress at 0 */
26
27#define in_blk 12 /* input byte array address parameter*/
28#define out_blk 8 /* output byte array address parameter*/
29#define tfm 4 /* Twofish context structure */
30
31#define a_offset 0
32#define b_offset 4
33#define c_offset 8
34#define d_offset 12
35
36/* Structure of the crypto context struct*/
37
38#define s0 0 /* S0 Array 256 Words each */
39#define s1 1024 /* S1 Array */
40#define s2 2048 /* S2 Array */
41#define s3 3072 /* S3 Array */
42#define w 4096 /* 8 whitening keys (word) */
43#define k 4128 /* key 1-32 ( word ) */
44
45/* define a few register aliases to allow macro substitution */
46
47#define R0D %eax
48#define R0B %al
49#define R0H %ah
50
51#define R1D %ebx
52#define R1B %bl
53#define R1H %bh
54
55#define R2D %ecx
56#define R2B %cl
57#define R2H %ch
58
59#define R3D %edx
60#define R3B %dl
61#define R3H %dh
62
63
64/* performs input whitening */
65#define input_whitening(src,context,offset)\
66 xor w+offset(context), src;
67
68/* performs input whitening */
69#define output_whitening(src,context,offset)\
70 xor w+16+offset(context), src;
71
72/*
73 * a input register containing a (rotated 16)
74 * b input register containing b
75 * c input register containing c
76 * d input register containing d (already rol $1)
77 * operations on a and b are interleaved to increase performance
78 */
79#define encrypt_round(a,b,c,d,round)\
80 push d ## D;\
81 movzx b ## B, %edi;\
82 mov s1(%ebp,%edi,4),d ## D;\
83 movzx a ## B, %edi;\
84 mov s2(%ebp,%edi,4),%esi;\
85 movzx b ## H, %edi;\
86 ror $16, b ## D;\
87 xor s2(%ebp,%edi,4),d ## D;\
88 movzx a ## H, %edi;\
89 ror $16, a ## D;\
90 xor s3(%ebp,%edi,4),%esi;\
91 movzx b ## B, %edi;\
92 xor s3(%ebp,%edi,4),d ## D;\
93 movzx a ## B, %edi;\
94 xor (%ebp,%edi,4), %esi;\
95 movzx b ## H, %edi;\
96 ror $15, b ## D;\
97 xor (%ebp,%edi,4), d ## D;\
98 movzx a ## H, %edi;\
99 xor s1(%ebp,%edi,4),%esi;\
100 pop %edi;\
101 add d ## D, %esi;\
102 add %esi, d ## D;\
103 add k+round(%ebp), %esi;\
104 xor %esi, c ## D;\
105 rol $15, c ## D;\
106 add k+4+round(%ebp),d ## D;\
107 xor %edi, d ## D;
108
109/*
110 * a input register containing a (rotated 16)
111 * b input register containing b
112 * c input register containing c
113 * d input register containing d (already rol $1)
114 * operations on a and b are interleaved to increase performance
115 * last round has different rotations for the output preparation
116 */
117#define encrypt_last_round(a,b,c,d,round)\
118 push d ## D;\
119 movzx b ## B, %edi;\
120 mov s1(%ebp,%edi,4),d ## D;\
121 movzx a ## B, %edi;\
122 mov s2(%ebp,%edi,4),%esi;\
123 movzx b ## H, %edi;\
124 ror $16, b ## D;\
125 xor s2(%ebp,%edi,4),d ## D;\
126 movzx a ## H, %edi;\
127 ror $16, a ## D;\
128 xor s3(%ebp,%edi,4),%esi;\
129 movzx b ## B, %edi;\
130 xor s3(%ebp,%edi,4),d ## D;\
131 movzx a ## B, %edi;\
132 xor (%ebp,%edi,4), %esi;\
133 movzx b ## H, %edi;\
134 ror $16, b ## D;\
135 xor (%ebp,%edi,4), d ## D;\
136 movzx a ## H, %edi;\
137 xor s1(%ebp,%edi,4),%esi;\
138 pop %edi;\
139 add d ## D, %esi;\
140 add %esi, d ## D;\
141 add k+round(%ebp), %esi;\
142 xor %esi, c ## D;\
143 ror $1, c ## D;\
144 add k+4+round(%ebp),d ## D;\
145 xor %edi, d ## D;
146
147/*
148 * a input register containing a
149 * b input register containing b (rotated 16)
150 * c input register containing c
151 * d input register containing d (already rol $1)
152 * operations on a and b are interleaved to increase performance
153 */
154#define decrypt_round(a,b,c,d,round)\
155 push c ## D;\
156 movzx a ## B, %edi;\
157 mov (%ebp,%edi,4), c ## D;\
158 movzx b ## B, %edi;\
159 mov s3(%ebp,%edi,4),%esi;\
160 movzx a ## H, %edi;\
161 ror $16, a ## D;\
162 xor s1(%ebp,%edi,4),c ## D;\
163 movzx b ## H, %edi;\
164 ror $16, b ## D;\
165 xor (%ebp,%edi,4), %esi;\
166 movzx a ## B, %edi;\
167 xor s2(%ebp,%edi,4),c ## D;\
168 movzx b ## B, %edi;\
169 xor s1(%ebp,%edi,4),%esi;\
170 movzx a ## H, %edi;\
171 ror $15, a ## D;\
172 xor s3(%ebp,%edi,4),c ## D;\
173 movzx b ## H, %edi;\
174 xor s2(%ebp,%edi,4),%esi;\
175 pop %edi;\
176 add %esi, c ## D;\
177 add c ## D, %esi;\
178 add k+round(%ebp), c ## D;\
179 xor %edi, c ## D;\
180 add k+4+round(%ebp),%esi;\
181 xor %esi, d ## D;\
182 rol $15, d ## D;
183
184/*
185 * a input register containing a
186 * b input register containing b (rotated 16)
187 * c input register containing c
188 * d input register containing d (already rol $1)
189 * operations on a and b are interleaved to increase performance
190 * last round has different rotations for the output preparation
191 */
192#define decrypt_last_round(a,b,c,d,round)\
193 push c ## D;\
194 movzx a ## B, %edi;\
195 mov (%ebp,%edi,4), c ## D;\
196 movzx b ## B, %edi;\
197 mov s3(%ebp,%edi,4),%esi;\
198 movzx a ## H, %edi;\
199 ror $16, a ## D;\
200 xor s1(%ebp,%edi,4),c ## D;\
201 movzx b ## H, %edi;\
202 ror $16, b ## D;\
203 xor (%ebp,%edi,4), %esi;\
204 movzx a ## B, %edi;\
205 xor s2(%ebp,%edi,4),c ## D;\
206 movzx b ## B, %edi;\
207 xor s1(%ebp,%edi,4),%esi;\
208 movzx a ## H, %edi;\
209 ror $16, a ## D;\
210 xor s3(%ebp,%edi,4),c ## D;\
211 movzx b ## H, %edi;\
212 xor s2(%ebp,%edi,4),%esi;\
213 pop %edi;\
214 add %esi, c ## D;\
215 add c ## D, %esi;\
216 add k+round(%ebp), c ## D;\
217 xor %edi, c ## D;\
218 add k+4+round(%ebp),%esi;\
219 xor %esi, d ## D;\
220 ror $1, d ## D;
221
222.align 4
223.global twofish_enc_blk
224.global twofish_dec_blk
225
226twofish_enc_blk:
227 push %ebp /* save registers according to calling convention*/
228 push %ebx
229 push %esi
230 push %edi
231
232 mov tfm + 16(%esp), %ebp /* abuse the base pointer: set new base bointer to the crypto tfm */
233 add $crypto_tfm_ctx_offset, %ebp /* ctx adress */
234 mov in_blk+16(%esp),%edi /* input adress in edi */
235
236 mov (%edi), %eax
237 mov b_offset(%edi), %ebx
238 mov c_offset(%edi), %ecx
239 mov d_offset(%edi), %edx
240 input_whitening(%eax,%ebp,a_offset)
241 ror $16, %eax
242 input_whitening(%ebx,%ebp,b_offset)
243 input_whitening(%ecx,%ebp,c_offset)
244 input_whitening(%edx,%ebp,d_offset)
245 rol $1, %edx
246
247 encrypt_round(R0,R1,R2,R3,0);
248 encrypt_round(R2,R3,R0,R1,8);
249 encrypt_round(R0,R1,R2,R3,2*8);
250 encrypt_round(R2,R3,R0,R1,3*8);
251 encrypt_round(R0,R1,R2,R3,4*8);
252 encrypt_round(R2,R3,R0,R1,5*8);
253 encrypt_round(R0,R1,R2,R3,6*8);
254 encrypt_round(R2,R3,R0,R1,7*8);
255 encrypt_round(R0,R1,R2,R3,8*8);
256 encrypt_round(R2,R3,R0,R1,9*8);
257 encrypt_round(R0,R1,R2,R3,10*8);
258 encrypt_round(R2,R3,R0,R1,11*8);
259 encrypt_round(R0,R1,R2,R3,12*8);
260 encrypt_round(R2,R3,R0,R1,13*8);
261 encrypt_round(R0,R1,R2,R3,14*8);
262 encrypt_last_round(R2,R3,R0,R1,15*8);
263
264 output_whitening(%eax,%ebp,c_offset)
265 output_whitening(%ebx,%ebp,d_offset)
266 output_whitening(%ecx,%ebp,a_offset)
267 output_whitening(%edx,%ebp,b_offset)
268 mov out_blk+16(%esp),%edi;
269 mov %eax, c_offset(%edi)
270 mov %ebx, d_offset(%edi)
271 mov %ecx, (%edi)
272 mov %edx, b_offset(%edi)
273
274 pop %edi
275 pop %esi
276 pop %ebx
277 pop %ebp
278 mov $1, %eax
279 ret
280
281twofish_dec_blk:
282 push %ebp /* save registers according to calling convention*/
283 push %ebx
284 push %esi
285 push %edi
286
287
288 mov tfm + 16(%esp), %ebp /* abuse the base pointer: set new base bointer to the crypto tfm */
289 add $crypto_tfm_ctx_offset, %ebp /* ctx adress */
290 mov in_blk+16(%esp),%edi /* input adress in edi */
291
292 mov (%edi), %eax
293 mov b_offset(%edi), %ebx
294 mov c_offset(%edi), %ecx
295 mov d_offset(%edi), %edx
296 output_whitening(%eax,%ebp,a_offset)
297 output_whitening(%ebx,%ebp,b_offset)
298 ror $16, %ebx
299 output_whitening(%ecx,%ebp,c_offset)
300 output_whitening(%edx,%ebp,d_offset)
301 rol $1, %ecx
302
303 decrypt_round(R0,R1,R2,R3,15*8);
304 decrypt_round(R2,R3,R0,R1,14*8);
305 decrypt_round(R0,R1,R2,R3,13*8);
306 decrypt_round(R2,R3,R0,R1,12*8);
307 decrypt_round(R0,R1,R2,R3,11*8);
308 decrypt_round(R2,R3,R0,R1,10*8);
309 decrypt_round(R0,R1,R2,R3,9*8);
310 decrypt_round(R2,R3,R0,R1,8*8);
311 decrypt_round(R0,R1,R2,R3,7*8);
312 decrypt_round(R2,R3,R0,R1,6*8);
313 decrypt_round(R0,R1,R2,R3,5*8);
314 decrypt_round(R2,R3,R0,R1,4*8);
315 decrypt_round(R0,R1,R2,R3,3*8);
316 decrypt_round(R2,R3,R0,R1,2*8);
317 decrypt_round(R0,R1,R2,R3,1*8);
318 decrypt_last_round(R2,R3,R0,R1,0);
319
320 input_whitening(%eax,%ebp,c_offset)
321 input_whitening(%ebx,%ebp,d_offset)
322 input_whitening(%ecx,%ebp,a_offset)
323 input_whitening(%edx,%ebp,b_offset)
324 mov out_blk+16(%esp),%edi;
325 mov %eax, c_offset(%edi)
326 mov %ebx, d_offset(%edi)
327 mov %ecx, (%edi)
328 mov %edx, b_offset(%edi)
329
330 pop %edi
331 pop %esi
332 pop %ebx
333 pop %ebp
334 mov $1, %eax
335 ret
diff --git a/arch/i386/crypto/twofish.c b/arch/i386/crypto/twofish.c
new file mode 100644
index 000000000000..e3004dfe9c7a
--- /dev/null
+++ b/arch/i386/crypto/twofish.c
@@ -0,0 +1,97 @@
1/*
2 * Glue Code for optimized 586 assembler version of TWOFISH
3 *
4 * Originally Twofish for GPG
5 * By Matthew Skala <mskala@ansuz.sooke.bc.ca>, July 26, 1998
6 * 256-bit key length added March 20, 1999
7 * Some modifications to reduce the text size by Werner Koch, April, 1998
8 * Ported to the kerneli patch by Marc Mutz <Marc@Mutz.com>
9 * Ported to CryptoAPI by Colin Slater <hoho@tacomeat.net>
10 *
11 * The original author has disclaimed all copyright interest in this
12 * code and thus put it in the public domain. The subsequent authors
13 * have put this under the GNU General Public License.
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
28 * USA
29 *
30 * This code is a "clean room" implementation, written from the paper
31 * _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey,
32 * Doug Whiting, David Wagner, Chris Hall, and Niels Ferguson, available
33 * through http://www.counterpane.com/twofish.html
34 *
35 * For background information on multiplication in finite fields, used for
36 * the matrix operations in the key schedule, see the book _Contemporary
37 * Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the
38 * Third Edition.
39 */
40
41#include <crypto/twofish.h>
42#include <linux/crypto.h>
43#include <linux/init.h>
44#include <linux/module.h>
45#include <linux/types.h>
46
47
48asmlinkage void twofish_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
49asmlinkage void twofish_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
50
51static void twofish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
52{
53 twofish_enc_blk(tfm, dst, src);
54}
55
56static void twofish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
57{
58 twofish_dec_blk(tfm, dst, src);
59}
60
61static struct crypto_alg alg = {
62 .cra_name = "twofish",
63 .cra_driver_name = "twofish-i586",
64 .cra_priority = 200,
65 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
66 .cra_blocksize = TF_BLOCK_SIZE,
67 .cra_ctxsize = sizeof(struct twofish_ctx),
68 .cra_alignmask = 3,
69 .cra_module = THIS_MODULE,
70 .cra_list = LIST_HEAD_INIT(alg.cra_list),
71 .cra_u = {
72 .cipher = {
73 .cia_min_keysize = TF_MIN_KEY_SIZE,
74 .cia_max_keysize = TF_MAX_KEY_SIZE,
75 .cia_setkey = twofish_setkey,
76 .cia_encrypt = twofish_encrypt,
77 .cia_decrypt = twofish_decrypt
78 }
79 }
80};
81
82static int __init init(void)
83{
84 return crypto_register_alg(&alg);
85}
86
87static void __exit fini(void)
88{
89 crypto_unregister_alg(&alg);
90}
91
92module_init(init);
93module_exit(fini);
94
95MODULE_LICENSE("GPL");
96MODULE_DESCRIPTION ("Twofish Cipher Algorithm, i586 asm optimized");
97MODULE_ALIAS("twofish");
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 5713c7e5bd16..15c9eec02928 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -16,9 +16,9 @@
16 * 16 *
17 */ 17 */
18 18
19#include <crypto/algapi.h>
19#include <linux/module.h> 20#include <linux/module.h>
20#include <linux/init.h> 21#include <linux/init.h>
21#include <linux/crypto.h>
22#include "crypt_s390.h" 22#include "crypt_s390.h"
23 23
24#define AES_MIN_KEY_SIZE 16 24#define AES_MIN_KEY_SIZE 16
@@ -34,13 +34,16 @@ int has_aes_256 = 0;
34struct s390_aes_ctx { 34struct s390_aes_ctx {
35 u8 iv[AES_BLOCK_SIZE]; 35 u8 iv[AES_BLOCK_SIZE];
36 u8 key[AES_MAX_KEY_SIZE]; 36 u8 key[AES_MAX_KEY_SIZE];
37 long enc;
38 long dec;
37 int key_len; 39 int key_len;
38}; 40};
39 41
40static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 42static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
41 unsigned int key_len, u32 *flags) 43 unsigned int key_len)
42{ 44{
43 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 45 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
46 u32 *flags = &tfm->crt_flags;
44 47
45 switch (key_len) { 48 switch (key_len) {
46 case 16: 49 case 16:
@@ -110,133 +113,206 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
110 } 113 }
111} 114}
112 115
113static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out,
114 const u8 *in, unsigned int nbytes)
115{
116 struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm);
117 int ret;
118 116
119 /* only use complete blocks */ 117static struct crypto_alg aes_alg = {
120 nbytes &= ~(AES_BLOCK_SIZE - 1); 118 .cra_name = "aes",
119 .cra_driver_name = "aes-s390",
120 .cra_priority = CRYPT_S390_PRIORITY,
121 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
122 .cra_blocksize = AES_BLOCK_SIZE,
123 .cra_ctxsize = sizeof(struct s390_aes_ctx),
124 .cra_module = THIS_MODULE,
125 .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
126 .cra_u = {
127 .cipher = {
128 .cia_min_keysize = AES_MIN_KEY_SIZE,
129 .cia_max_keysize = AES_MAX_KEY_SIZE,
130 .cia_setkey = aes_set_key,
131 .cia_encrypt = aes_encrypt,
132 .cia_decrypt = aes_decrypt,
133 }
134 }
135};
136
137static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
138 unsigned int key_len)
139{
140 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
121 141
122 switch (sctx->key_len) { 142 switch (key_len) {
123 case 16: 143 case 16:
124 ret = crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in, nbytes); 144 sctx->enc = KM_AES_128_ENCRYPT;
125 BUG_ON((ret < 0) || (ret != nbytes)); 145 sctx->dec = KM_AES_128_DECRYPT;
126 break; 146 break;
127 case 24: 147 case 24:
128 ret = crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in, nbytes); 148 sctx->enc = KM_AES_192_ENCRYPT;
129 BUG_ON((ret < 0) || (ret != nbytes)); 149 sctx->dec = KM_AES_192_DECRYPT;
130 break; 150 break;
131 case 32: 151 case 32:
132 ret = crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in, nbytes); 152 sctx->enc = KM_AES_256_ENCRYPT;
133 BUG_ON((ret < 0) || (ret != nbytes)); 153 sctx->dec = KM_AES_256_DECRYPT;
134 break; 154 break;
135 } 155 }
136 return nbytes; 156
157 return aes_set_key(tfm, in_key, key_len);
137} 158}
138 159
139static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out, 160static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
140 const u8 *in, unsigned int nbytes) 161 struct blkcipher_walk *walk)
141{ 162{
142 struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm); 163 int ret = blkcipher_walk_virt(desc, walk);
143 int ret; 164 unsigned int nbytes;
144 165
145 /* only use complete blocks */ 166 while ((nbytes = walk->nbytes)) {
146 nbytes &= ~(AES_BLOCK_SIZE - 1); 167 /* only use complete blocks */
168 unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
169 u8 *out = walk->dst.virt.addr;
170 u8 *in = walk->src.virt.addr;
147 171
148 switch (sctx->key_len) { 172 ret = crypt_s390_km(func, param, out, in, n);
149 case 16: 173 BUG_ON((ret < 0) || (ret != n));
150 ret = crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in, nbytes); 174
151 BUG_ON((ret < 0) || (ret != nbytes)); 175 nbytes &= AES_BLOCK_SIZE - 1;
152 break; 176 ret = blkcipher_walk_done(desc, walk, nbytes);
153 case 24:
154 ret = crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in, nbytes);
155 BUG_ON((ret < 0) || (ret != nbytes));
156 break;
157 case 32:
158 ret = crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in, nbytes);
159 BUG_ON((ret < 0) || (ret != nbytes));
160 break;
161 } 177 }
162 return nbytes; 178
179 return ret;
163} 180}
164 181
165static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out, 182static int ecb_aes_encrypt(struct blkcipher_desc *desc,
166 const u8 *in, unsigned int nbytes) 183 struct scatterlist *dst, struct scatterlist *src,
184 unsigned int nbytes)
167{ 185{
168 struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm); 186 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
169 int ret; 187 struct blkcipher_walk walk;
170 188
171 /* only use complete blocks */ 189 blkcipher_walk_init(&walk, dst, src, nbytes);
172 nbytes &= ~(AES_BLOCK_SIZE - 1); 190 return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk);
191}
173 192
174 memcpy(&sctx->iv, desc->info, AES_BLOCK_SIZE); 193static int ecb_aes_decrypt(struct blkcipher_desc *desc,
175 switch (sctx->key_len) { 194 struct scatterlist *dst, struct scatterlist *src,
195 unsigned int nbytes)
196{
197 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
198 struct blkcipher_walk walk;
199
200 blkcipher_walk_init(&walk, dst, src, nbytes);
201 return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk);
202}
203
204static struct crypto_alg ecb_aes_alg = {
205 .cra_name = "ecb(aes)",
206 .cra_driver_name = "ecb-aes-s390",
207 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
208 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
209 .cra_blocksize = AES_BLOCK_SIZE,
210 .cra_ctxsize = sizeof(struct s390_aes_ctx),
211 .cra_type = &crypto_blkcipher_type,
212 .cra_module = THIS_MODULE,
213 .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list),
214 .cra_u = {
215 .blkcipher = {
216 .min_keysize = AES_MIN_KEY_SIZE,
217 .max_keysize = AES_MAX_KEY_SIZE,
218 .setkey = ecb_aes_set_key,
219 .encrypt = ecb_aes_encrypt,
220 .decrypt = ecb_aes_decrypt,
221 }
222 }
223};
224
225static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
226 unsigned int key_len)
227{
228 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
229
230 switch (key_len) {
176 case 16: 231 case 16:
177 ret = crypt_s390_kmc(KMC_AES_128_ENCRYPT, &sctx->iv, out, in, nbytes); 232 sctx->enc = KMC_AES_128_ENCRYPT;
178 BUG_ON((ret < 0) || (ret != nbytes)); 233 sctx->dec = KMC_AES_128_DECRYPT;
179 break; 234 break;
180 case 24: 235 case 24:
181 ret = crypt_s390_kmc(KMC_AES_192_ENCRYPT, &sctx->iv, out, in, nbytes); 236 sctx->enc = KMC_AES_192_ENCRYPT;
182 BUG_ON((ret < 0) || (ret != nbytes)); 237 sctx->dec = KMC_AES_192_DECRYPT;
183 break; 238 break;
184 case 32: 239 case 32:
185 ret = crypt_s390_kmc(KMC_AES_256_ENCRYPT, &sctx->iv, out, in, nbytes); 240 sctx->enc = KMC_AES_256_ENCRYPT;
186 BUG_ON((ret < 0) || (ret != nbytes)); 241 sctx->dec = KMC_AES_256_DECRYPT;
187 break; 242 break;
188 } 243 }
189 memcpy(desc->info, &sctx->iv, AES_BLOCK_SIZE);
190 244
191 return nbytes; 245 return aes_set_key(tfm, in_key, key_len);
192} 246}
193 247
194static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out, 248static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
195 const u8 *in, unsigned int nbytes) 249 struct blkcipher_walk *walk)
196{ 250{
197 struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm); 251 int ret = blkcipher_walk_virt(desc, walk);
198 int ret; 252 unsigned int nbytes = walk->nbytes;
199 253
200 /* only use complete blocks */ 254 if (!nbytes)
201 nbytes &= ~(AES_BLOCK_SIZE - 1); 255 goto out;
202 256
203 memcpy(&sctx->iv, desc->info, AES_BLOCK_SIZE); 257 memcpy(param, walk->iv, AES_BLOCK_SIZE);
204 switch (sctx->key_len) { 258 do {
205 case 16: 259 /* only use complete blocks */
206 ret = crypt_s390_kmc(KMC_AES_128_DECRYPT, &sctx->iv, out, in, nbytes); 260 unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
207 BUG_ON((ret < 0) || (ret != nbytes)); 261 u8 *out = walk->dst.virt.addr;
208 break; 262 u8 *in = walk->src.virt.addr;
209 case 24: 263
210 ret = crypt_s390_kmc(KMC_AES_192_DECRYPT, &sctx->iv, out, in, nbytes); 264 ret = crypt_s390_kmc(func, param, out, in, n);
211 BUG_ON((ret < 0) || (ret != nbytes)); 265 BUG_ON((ret < 0) || (ret != n));
212 break; 266
213 case 32: 267 nbytes &= AES_BLOCK_SIZE - 1;
214 ret = crypt_s390_kmc(KMC_AES_256_DECRYPT, &sctx->iv, out, in, nbytes); 268 ret = blkcipher_walk_done(desc, walk, nbytes);
215 BUG_ON((ret < 0) || (ret != nbytes)); 269 } while ((nbytes = walk->nbytes));
216 break; 270 memcpy(walk->iv, param, AES_BLOCK_SIZE);
217 } 271
218 return nbytes; 272out:
273 return ret;
219} 274}
220 275
276static int cbc_aes_encrypt(struct blkcipher_desc *desc,
277 struct scatterlist *dst, struct scatterlist *src,
278 unsigned int nbytes)
279{
280 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
281 struct blkcipher_walk walk;
221 282
222static struct crypto_alg aes_alg = { 283 blkcipher_walk_init(&walk, dst, src, nbytes);
223 .cra_name = "aes", 284 return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk);
224 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 285}
286
287static int cbc_aes_decrypt(struct blkcipher_desc *desc,
288 struct scatterlist *dst, struct scatterlist *src,
289 unsigned int nbytes)
290{
291 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
292 struct blkcipher_walk walk;
293
294 blkcipher_walk_init(&walk, dst, src, nbytes);
295 return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk);
296}
297
298static struct crypto_alg cbc_aes_alg = {
299 .cra_name = "cbc(aes)",
300 .cra_driver_name = "cbc-aes-s390",
301 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
302 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
225 .cra_blocksize = AES_BLOCK_SIZE, 303 .cra_blocksize = AES_BLOCK_SIZE,
226 .cra_ctxsize = sizeof(struct s390_aes_ctx), 304 .cra_ctxsize = sizeof(struct s390_aes_ctx),
305 .cra_type = &crypto_blkcipher_type,
227 .cra_module = THIS_MODULE, 306 .cra_module = THIS_MODULE,
228 .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), 307 .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list),
229 .cra_u = { 308 .cra_u = {
230 .cipher = { 309 .blkcipher = {
231 .cia_min_keysize = AES_MIN_KEY_SIZE, 310 .min_keysize = AES_MIN_KEY_SIZE,
232 .cia_max_keysize = AES_MAX_KEY_SIZE, 311 .max_keysize = AES_MAX_KEY_SIZE,
233 .cia_setkey = aes_set_key, 312 .ivsize = AES_BLOCK_SIZE,
234 .cia_encrypt = aes_encrypt, 313 .setkey = cbc_aes_set_key,
235 .cia_decrypt = aes_decrypt, 314 .encrypt = cbc_aes_encrypt,
236 .cia_encrypt_ecb = aes_encrypt_ecb, 315 .decrypt = cbc_aes_decrypt,
237 .cia_decrypt_ecb = aes_decrypt_ecb,
238 .cia_encrypt_cbc = aes_encrypt_cbc,
239 .cia_decrypt_cbc = aes_decrypt_cbc,
240 } 316 }
241 } 317 }
242}; 318};
@@ -256,13 +332,40 @@ static int __init aes_init(void)
256 return -ENOSYS; 332 return -ENOSYS;
257 333
258 ret = crypto_register_alg(&aes_alg); 334 ret = crypto_register_alg(&aes_alg);
259 if (ret != 0) 335 if (ret != 0) {
260 printk(KERN_INFO "crypt_s390: aes_s390 couldn't be loaded.\n"); 336 printk(KERN_INFO "crypt_s390: aes-s390 couldn't be loaded.\n");
337 goto aes_err;
338 }
339
340 ret = crypto_register_alg(&ecb_aes_alg);
341 if (ret != 0) {
342 printk(KERN_INFO
343 "crypt_s390: ecb-aes-s390 couldn't be loaded.\n");
344 goto ecb_aes_err;
345 }
346
347 ret = crypto_register_alg(&cbc_aes_alg);
348 if (ret != 0) {
349 printk(KERN_INFO
350 "crypt_s390: cbc-aes-s390 couldn't be loaded.\n");
351 goto cbc_aes_err;
352 }
353
354out:
261 return ret; 355 return ret;
356
357cbc_aes_err:
358 crypto_unregister_alg(&ecb_aes_alg);
359ecb_aes_err:
360 crypto_unregister_alg(&aes_alg);
361aes_err:
362 goto out;
262} 363}
263 364
264static void __exit aes_fini(void) 365static void __exit aes_fini(void)
265{ 366{
367 crypto_unregister_alg(&cbc_aes_alg);
368 crypto_unregister_alg(&ecb_aes_alg);
266 crypto_unregister_alg(&aes_alg); 369 crypto_unregister_alg(&aes_alg);
267} 370}
268 371
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h
index d1c259a7fe33..efd836c2e4a6 100644
--- a/arch/s390/crypto/crypt_s390.h
+++ b/arch/s390/crypto/crypt_s390.h
@@ -20,6 +20,9 @@
20#define CRYPT_S390_OP_MASK 0xFF00 20#define CRYPT_S390_OP_MASK 0xFF00
21#define CRYPT_S390_FUNC_MASK 0x00FF 21#define CRYPT_S390_FUNC_MASK 0x00FF
22 22
23#define CRYPT_S390_PRIORITY 300
24#define CRYPT_S390_COMPOSITE_PRIORITY 400
25
23/* s930 cryptographic operations */ 26/* s930 cryptographic operations */
24enum crypt_s390_operations { 27enum crypt_s390_operations {
25 CRYPT_S390_KM = 0x0100, 28 CRYPT_S390_KM = 0x0100,
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index b3f7496a79b4..2aba04852fe3 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -13,9 +13,10 @@
13 * (at your option) any later version. 13 * (at your option) any later version.
14 * 14 *
15 */ 15 */
16
17#include <crypto/algapi.h>
16#include <linux/init.h> 18#include <linux/init.h>
17#include <linux/module.h> 19#include <linux/module.h>
18#include <linux/crypto.h>
19 20
20#include "crypt_s390.h" 21#include "crypt_s390.h"
21#include "crypto_des.h" 22#include "crypto_des.h"
@@ -45,9 +46,10 @@ struct crypt_s390_des3_192_ctx {
45}; 46};
46 47
47static int des_setkey(struct crypto_tfm *tfm, const u8 *key, 48static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
48 unsigned int keylen, u32 *flags) 49 unsigned int keylen)
49{ 50{
50 struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm); 51 struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm);
52 u32 *flags = &tfm->crt_flags;
51 int ret; 53 int ret;
52 54
53 /* test if key is valid (not a weak key) */ 55 /* test if key is valid (not a weak key) */
@@ -71,85 +73,159 @@ static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
71 crypt_s390_km(KM_DEA_DECRYPT, dctx->key, out, in, DES_BLOCK_SIZE); 73 crypt_s390_km(KM_DEA_DECRYPT, dctx->key, out, in, DES_BLOCK_SIZE);
72} 74}
73 75
74static unsigned int des_encrypt_ecb(const struct cipher_desc *desc, u8 *out, 76static struct crypto_alg des_alg = {
75 const u8 *in, unsigned int nbytes) 77 .cra_name = "des",
78 .cra_driver_name = "des-s390",
79 .cra_priority = CRYPT_S390_PRIORITY,
80 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
81 .cra_blocksize = DES_BLOCK_SIZE,
82 .cra_ctxsize = sizeof(struct crypt_s390_des_ctx),
83 .cra_module = THIS_MODULE,
84 .cra_list = LIST_HEAD_INIT(des_alg.cra_list),
85 .cra_u = {
86 .cipher = {
87 .cia_min_keysize = DES_KEY_SIZE,
88 .cia_max_keysize = DES_KEY_SIZE,
89 .cia_setkey = des_setkey,
90 .cia_encrypt = des_encrypt,
91 .cia_decrypt = des_decrypt,
92 }
93 }
94};
95
96static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
97 void *param, struct blkcipher_walk *walk)
76{ 98{
77 struct crypt_s390_des_ctx *sctx = crypto_tfm_ctx(desc->tfm); 99 int ret = blkcipher_walk_virt(desc, walk);
78 int ret; 100 unsigned int nbytes;
101
102 while ((nbytes = walk->nbytes)) {
103 /* only use complete blocks */
104 unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
105 u8 *out = walk->dst.virt.addr;
106 u8 *in = walk->src.virt.addr;
79 107
80 /* only use complete blocks */ 108 ret = crypt_s390_km(func, param, out, in, n);
81 nbytes &= ~(DES_BLOCK_SIZE - 1); 109 BUG_ON((ret < 0) || (ret != n));
82 ret = crypt_s390_km(KM_DEA_ENCRYPT, sctx->key, out, in, nbytes);
83 BUG_ON((ret < 0) || (ret != nbytes));
84 110
85 return nbytes; 111 nbytes &= DES_BLOCK_SIZE - 1;
112 ret = blkcipher_walk_done(desc, walk, nbytes);
113 }
114
115 return ret;
86} 116}
87 117
88static unsigned int des_decrypt_ecb(const struct cipher_desc *desc, u8 *out, 118static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
89 const u8 *in, unsigned int nbytes) 119 void *param, struct blkcipher_walk *walk)
90{ 120{
91 struct crypt_s390_des_ctx *sctx = crypto_tfm_ctx(desc->tfm); 121 int ret = blkcipher_walk_virt(desc, walk);
92 int ret; 122 unsigned int nbytes = walk->nbytes;
123
124 if (!nbytes)
125 goto out;
126
127 memcpy(param, walk->iv, DES_BLOCK_SIZE);
128 do {
129 /* only use complete blocks */
130 unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
131 u8 *out = walk->dst.virt.addr;
132 u8 *in = walk->src.virt.addr;
93 133
94 /* only use complete blocks */ 134 ret = crypt_s390_kmc(func, param, out, in, n);
95 nbytes &= ~(DES_BLOCK_SIZE - 1); 135 BUG_ON((ret < 0) || (ret != n));
96 ret = crypt_s390_km(KM_DEA_DECRYPT, sctx->key, out, in, nbytes);
97 BUG_ON((ret < 0) || (ret != nbytes));
98 136
99 return nbytes; 137 nbytes &= DES_BLOCK_SIZE - 1;
138 ret = blkcipher_walk_done(desc, walk, nbytes);
139 } while ((nbytes = walk->nbytes));
140 memcpy(walk->iv, param, DES_BLOCK_SIZE);
141
142out:
143 return ret;
100} 144}
101 145
102static unsigned int des_encrypt_cbc(const struct cipher_desc *desc, u8 *out, 146static int ecb_des_encrypt(struct blkcipher_desc *desc,
103 const u8 *in, unsigned int nbytes) 147 struct scatterlist *dst, struct scatterlist *src,
148 unsigned int nbytes)
104{ 149{
105 struct crypt_s390_des_ctx *sctx = crypto_tfm_ctx(desc->tfm); 150 struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
106 int ret; 151 struct blkcipher_walk walk;
107 152
108 /* only use complete blocks */ 153 blkcipher_walk_init(&walk, dst, src, nbytes);
109 nbytes &= ~(DES_BLOCK_SIZE - 1); 154 return ecb_desall_crypt(desc, KM_DEA_ENCRYPT, sctx->key, &walk);
155}
110 156
111 memcpy(sctx->iv, desc->info, DES_BLOCK_SIZE); 157static int ecb_des_decrypt(struct blkcipher_desc *desc,
112 ret = crypt_s390_kmc(KMC_DEA_ENCRYPT, &sctx->iv, out, in, nbytes); 158 struct scatterlist *dst, struct scatterlist *src,
113 BUG_ON((ret < 0) || (ret != nbytes)); 159 unsigned int nbytes)
160{
161 struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
162 struct blkcipher_walk walk;
114 163
115 memcpy(desc->info, sctx->iv, DES_BLOCK_SIZE); 164 blkcipher_walk_init(&walk, dst, src, nbytes);
116 return nbytes; 165 return ecb_desall_crypt(desc, KM_DEA_DECRYPT, sctx->key, &walk);
117} 166}
118 167
119static unsigned int des_decrypt_cbc(const struct cipher_desc *desc, u8 *out, 168static struct crypto_alg ecb_des_alg = {
120 const u8 *in, unsigned int nbytes) 169 .cra_name = "ecb(des)",
170 .cra_driver_name = "ecb-des-s390",
171 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
172 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
173 .cra_blocksize = DES_BLOCK_SIZE,
174 .cra_ctxsize = sizeof(struct crypt_s390_des_ctx),
175 .cra_type = &crypto_blkcipher_type,
176 .cra_module = THIS_MODULE,
177 .cra_list = LIST_HEAD_INIT(ecb_des_alg.cra_list),
178 .cra_u = {
179 .blkcipher = {
180 .min_keysize = DES_KEY_SIZE,
181 .max_keysize = DES_KEY_SIZE,
182 .setkey = des_setkey,
183 .encrypt = ecb_des_encrypt,
184 .decrypt = ecb_des_decrypt,
185 }
186 }
187};
188
189static int cbc_des_encrypt(struct blkcipher_desc *desc,
190 struct scatterlist *dst, struct scatterlist *src,
191 unsigned int nbytes)
121{ 192{
122 struct crypt_s390_des_ctx *sctx = crypto_tfm_ctx(desc->tfm); 193 struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
123 int ret; 194 struct blkcipher_walk walk;
124 195
125 /* only use complete blocks */ 196 blkcipher_walk_init(&walk, dst, src, nbytes);
126 nbytes &= ~(DES_BLOCK_SIZE - 1); 197 return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, sctx->iv, &walk);
198}
127 199
128 memcpy(&sctx->iv, desc->info, DES_BLOCK_SIZE); 200static int cbc_des_decrypt(struct blkcipher_desc *desc,
129 ret = crypt_s390_kmc(KMC_DEA_DECRYPT, &sctx->iv, out, in, nbytes); 201 struct scatterlist *dst, struct scatterlist *src,
130 BUG_ON((ret < 0) || (ret != nbytes)); 202 unsigned int nbytes)
203{
204 struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
205 struct blkcipher_walk walk;
131 206
132 return nbytes; 207 blkcipher_walk_init(&walk, dst, src, nbytes);
208 return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, sctx->iv, &walk);
133} 209}
134 210
135static struct crypto_alg des_alg = { 211static struct crypto_alg cbc_des_alg = {
136 .cra_name = "des", 212 .cra_name = "cbc(des)",
137 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 213 .cra_driver_name = "cbc-des-s390",
214 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
215 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
138 .cra_blocksize = DES_BLOCK_SIZE, 216 .cra_blocksize = DES_BLOCK_SIZE,
139 .cra_ctxsize = sizeof(struct crypt_s390_des_ctx), 217 .cra_ctxsize = sizeof(struct crypt_s390_des_ctx),
218 .cra_type = &crypto_blkcipher_type,
140 .cra_module = THIS_MODULE, 219 .cra_module = THIS_MODULE,
141 .cra_list = LIST_HEAD_INIT(des_alg.cra_list), 220 .cra_list = LIST_HEAD_INIT(cbc_des_alg.cra_list),
142 .cra_u = { 221 .cra_u = {
143 .cipher = { 222 .blkcipher = {
144 .cia_min_keysize = DES_KEY_SIZE, 223 .min_keysize = DES_KEY_SIZE,
145 .cia_max_keysize = DES_KEY_SIZE, 224 .max_keysize = DES_KEY_SIZE,
146 .cia_setkey = des_setkey, 225 .ivsize = DES_BLOCK_SIZE,
147 .cia_encrypt = des_encrypt, 226 .setkey = des_setkey,
148 .cia_decrypt = des_decrypt, 227 .encrypt = cbc_des_encrypt,
149 .cia_encrypt_ecb = des_encrypt_ecb, 228 .decrypt = cbc_des_decrypt,
150 .cia_decrypt_ecb = des_decrypt_ecb,
151 .cia_encrypt_cbc = des_encrypt_cbc,
152 .cia_decrypt_cbc = des_decrypt_cbc,
153 } 229 }
154 } 230 }
155}; 231};
@@ -167,11 +243,12 @@ static struct crypto_alg des_alg = {
167 * 243 *
168 */ 244 */
169static int des3_128_setkey(struct crypto_tfm *tfm, const u8 *key, 245static int des3_128_setkey(struct crypto_tfm *tfm, const u8 *key,
170 unsigned int keylen, u32 *flags) 246 unsigned int keylen)
171{ 247{
172 int i, ret; 248 int i, ret;
173 struct crypt_s390_des3_128_ctx *dctx = crypto_tfm_ctx(tfm); 249 struct crypt_s390_des3_128_ctx *dctx = crypto_tfm_ctx(tfm);
174 const u8* temp_key = key; 250 const u8 *temp_key = key;
251 u32 *flags = &tfm->crt_flags;
175 252
176 if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE))) { 253 if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE))) {
177 *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED; 254 *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
@@ -202,89 +279,111 @@ static void des3_128_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
202 DES3_128_BLOCK_SIZE); 279 DES3_128_BLOCK_SIZE);
203} 280}
204 281
205static unsigned int des3_128_encrypt_ecb(const struct cipher_desc *desc, 282static struct crypto_alg des3_128_alg = {
206 u8 *out, const u8 *in, 283 .cra_name = "des3_ede128",
207 unsigned int nbytes) 284 .cra_driver_name = "des3_ede128-s390",
208{ 285 .cra_priority = CRYPT_S390_PRIORITY,
209 struct crypt_s390_des3_128_ctx *sctx = crypto_tfm_ctx(desc->tfm); 286 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
210 int ret; 287 .cra_blocksize = DES3_128_BLOCK_SIZE,
288 .cra_ctxsize = sizeof(struct crypt_s390_des3_128_ctx),
289 .cra_module = THIS_MODULE,
290 .cra_list = LIST_HEAD_INIT(des3_128_alg.cra_list),
291 .cra_u = {
292 .cipher = {
293 .cia_min_keysize = DES3_128_KEY_SIZE,
294 .cia_max_keysize = DES3_128_KEY_SIZE,
295 .cia_setkey = des3_128_setkey,
296 .cia_encrypt = des3_128_encrypt,
297 .cia_decrypt = des3_128_decrypt,
298 }
299 }
300};
211 301
212 /* only use complete blocks */ 302static int ecb_des3_128_encrypt(struct blkcipher_desc *desc,
213 nbytes &= ~(DES3_128_BLOCK_SIZE - 1); 303 struct scatterlist *dst,
214 ret = crypt_s390_km(KM_TDEA_128_ENCRYPT, sctx->key, out, in, nbytes); 304 struct scatterlist *src, unsigned int nbytes)
215 BUG_ON((ret < 0) || (ret != nbytes)); 305{
306 struct crypt_s390_des3_128_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
307 struct blkcipher_walk walk;
216 308
217 return nbytes; 309 blkcipher_walk_init(&walk, dst, src, nbytes);
310 return ecb_desall_crypt(desc, KM_TDEA_128_ENCRYPT, sctx->key, &walk);
218} 311}
219 312
220static unsigned int des3_128_decrypt_ecb(const struct cipher_desc *desc, 313static int ecb_des3_128_decrypt(struct blkcipher_desc *desc,
221 u8 *out, const u8 *in, 314 struct scatterlist *dst,
222 unsigned int nbytes) 315 struct scatterlist *src, unsigned int nbytes)
223{ 316{
224 struct crypt_s390_des3_128_ctx *sctx = crypto_tfm_ctx(desc->tfm); 317 struct crypt_s390_des3_128_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
225 int ret; 318 struct blkcipher_walk walk;
226 319
227 /* only use complete blocks */ 320 blkcipher_walk_init(&walk, dst, src, nbytes);
228 nbytes &= ~(DES3_128_BLOCK_SIZE - 1); 321 return ecb_desall_crypt(desc, KM_TDEA_128_DECRYPT, sctx->key, &walk);
229 ret = crypt_s390_km(KM_TDEA_128_DECRYPT, sctx->key, out, in, nbytes);
230 BUG_ON((ret < 0) || (ret != nbytes));
231
232 return nbytes;
233} 322}
234 323
235static unsigned int des3_128_encrypt_cbc(const struct cipher_desc *desc, 324static struct crypto_alg ecb_des3_128_alg = {
236 u8 *out, const u8 *in, 325 .cra_name = "ecb(des3_ede128)",
237 unsigned int nbytes) 326 .cra_driver_name = "ecb-des3_ede128-s390",
238{ 327 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
239 struct crypt_s390_des3_128_ctx *sctx = crypto_tfm_ctx(desc->tfm); 328 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
240 int ret; 329 .cra_blocksize = DES3_128_BLOCK_SIZE,
241 330 .cra_ctxsize = sizeof(struct crypt_s390_des3_128_ctx),
242 /* only use complete blocks */ 331 .cra_type = &crypto_blkcipher_type,
243 nbytes &= ~(DES3_128_BLOCK_SIZE - 1); 332 .cra_module = THIS_MODULE,
333 .cra_list = LIST_HEAD_INIT(
334 ecb_des3_128_alg.cra_list),
335 .cra_u = {
336 .blkcipher = {
337 .min_keysize = DES3_128_KEY_SIZE,
338 .max_keysize = DES3_128_KEY_SIZE,
339 .setkey = des3_128_setkey,
340 .encrypt = ecb_des3_128_encrypt,
341 .decrypt = ecb_des3_128_decrypt,
342 }
343 }
344};
244 345
245 memcpy(sctx->iv, desc->info, DES3_128_BLOCK_SIZE); 346static int cbc_des3_128_encrypt(struct blkcipher_desc *desc,
246 ret = crypt_s390_kmc(KMC_TDEA_128_ENCRYPT, &sctx->iv, out, in, nbytes); 347 struct scatterlist *dst,
247 BUG_ON((ret < 0) || (ret != nbytes)); 348 struct scatterlist *src, unsigned int nbytes)
349{
350 struct crypt_s390_des3_128_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
351 struct blkcipher_walk walk;
248 352
249 memcpy(desc->info, sctx->iv, DES3_128_BLOCK_SIZE); 353 blkcipher_walk_init(&walk, dst, src, nbytes);
250 return nbytes; 354 return cbc_desall_crypt(desc, KMC_TDEA_128_ENCRYPT, sctx->iv, &walk);
251} 355}
252 356
253static unsigned int des3_128_decrypt_cbc(const struct cipher_desc *desc, 357static int cbc_des3_128_decrypt(struct blkcipher_desc *desc,
254 u8 *out, const u8 *in, 358 struct scatterlist *dst,
255 unsigned int nbytes) 359 struct scatterlist *src, unsigned int nbytes)
256{ 360{
257 struct crypt_s390_des3_128_ctx *sctx = crypto_tfm_ctx(desc->tfm); 361 struct crypt_s390_des3_128_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
258 int ret; 362 struct blkcipher_walk walk;
259
260 /* only use complete blocks */
261 nbytes &= ~(DES3_128_BLOCK_SIZE - 1);
262
263 memcpy(&sctx->iv, desc->info, DES3_128_BLOCK_SIZE);
264 ret = crypt_s390_kmc(KMC_TDEA_128_DECRYPT, &sctx->iv, out, in, nbytes);
265 BUG_ON((ret < 0) || (ret != nbytes));
266 363
267 return nbytes; 364 blkcipher_walk_init(&walk, dst, src, nbytes);
365 return cbc_desall_crypt(desc, KMC_TDEA_128_DECRYPT, sctx->iv, &walk);
268} 366}
269 367
270static struct crypto_alg des3_128_alg = { 368static struct crypto_alg cbc_des3_128_alg = {
271 .cra_name = "des3_ede128", 369 .cra_name = "cbc(des3_ede128)",
272 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 370 .cra_driver_name = "cbc-des3_ede128-s390",
371 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
372 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
273 .cra_blocksize = DES3_128_BLOCK_SIZE, 373 .cra_blocksize = DES3_128_BLOCK_SIZE,
274 .cra_ctxsize = sizeof(struct crypt_s390_des3_128_ctx), 374 .cra_ctxsize = sizeof(struct crypt_s390_des3_128_ctx),
375 .cra_type = &crypto_blkcipher_type,
275 .cra_module = THIS_MODULE, 376 .cra_module = THIS_MODULE,
276 .cra_list = LIST_HEAD_INIT(des3_128_alg.cra_list), 377 .cra_list = LIST_HEAD_INIT(
378 cbc_des3_128_alg.cra_list),
277 .cra_u = { 379 .cra_u = {
278 .cipher = { 380 .blkcipher = {
279 .cia_min_keysize = DES3_128_KEY_SIZE, 381 .min_keysize = DES3_128_KEY_SIZE,
280 .cia_max_keysize = DES3_128_KEY_SIZE, 382 .max_keysize = DES3_128_KEY_SIZE,
281 .cia_setkey = des3_128_setkey, 383 .ivsize = DES3_128_BLOCK_SIZE,
282 .cia_encrypt = des3_128_encrypt, 384 .setkey = des3_128_setkey,
283 .cia_decrypt = des3_128_decrypt, 385 .encrypt = cbc_des3_128_encrypt,
284 .cia_encrypt_ecb = des3_128_encrypt_ecb, 386 .decrypt = cbc_des3_128_decrypt,
285 .cia_decrypt_ecb = des3_128_decrypt_ecb,
286 .cia_encrypt_cbc = des3_128_encrypt_cbc,
287 .cia_decrypt_cbc = des3_128_decrypt_cbc,
288 } 387 }
289 } 388 }
290}; 389};
@@ -303,11 +402,12 @@ static struct crypto_alg des3_128_alg = {
303 * 402 *
304 */ 403 */
305static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key, 404static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key,
306 unsigned int keylen, u32 *flags) 405 unsigned int keylen)
307{ 406{
308 int i, ret; 407 int i, ret;
309 struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm); 408 struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm);
310 const u8* temp_key = key; 409 const u8 *temp_key = key;
410 u32 *flags = &tfm->crt_flags;
311 411
312 if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && 412 if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
313 memcmp(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2], 413 memcmp(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
@@ -341,89 +441,111 @@ static void des3_192_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
341 DES3_192_BLOCK_SIZE); 441 DES3_192_BLOCK_SIZE);
342} 442}
343 443
344static unsigned int des3_192_encrypt_ecb(const struct cipher_desc *desc, 444static struct crypto_alg des3_192_alg = {
345 u8 *out, const u8 *in, 445 .cra_name = "des3_ede",
346 unsigned int nbytes) 446 .cra_driver_name = "des3_ede-s390",
347{ 447 .cra_priority = CRYPT_S390_PRIORITY,
348 struct crypt_s390_des3_192_ctx *sctx = crypto_tfm_ctx(desc->tfm); 448 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
349 int ret; 449 .cra_blocksize = DES3_192_BLOCK_SIZE,
450 .cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx),
451 .cra_module = THIS_MODULE,
452 .cra_list = LIST_HEAD_INIT(des3_192_alg.cra_list),
453 .cra_u = {
454 .cipher = {
455 .cia_min_keysize = DES3_192_KEY_SIZE,
456 .cia_max_keysize = DES3_192_KEY_SIZE,
457 .cia_setkey = des3_192_setkey,
458 .cia_encrypt = des3_192_encrypt,
459 .cia_decrypt = des3_192_decrypt,
460 }
461 }
462};
350 463
351 /* only use complete blocks */ 464static int ecb_des3_192_encrypt(struct blkcipher_desc *desc,
352 nbytes &= ~(DES3_192_BLOCK_SIZE - 1); 465 struct scatterlist *dst,
353 ret = crypt_s390_km(KM_TDEA_192_ENCRYPT, sctx->key, out, in, nbytes); 466 struct scatterlist *src, unsigned int nbytes)
354 BUG_ON((ret < 0) || (ret != nbytes)); 467{
468 struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
469 struct blkcipher_walk walk;
355 470
356 return nbytes; 471 blkcipher_walk_init(&walk, dst, src, nbytes);
472 return ecb_desall_crypt(desc, KM_TDEA_192_ENCRYPT, sctx->key, &walk);
357} 473}
358 474
359static unsigned int des3_192_decrypt_ecb(const struct cipher_desc *desc, 475static int ecb_des3_192_decrypt(struct blkcipher_desc *desc,
360 u8 *out, const u8 *in, 476 struct scatterlist *dst,
361 unsigned int nbytes) 477 struct scatterlist *src, unsigned int nbytes)
362{ 478{
363 struct crypt_s390_des3_192_ctx *sctx = crypto_tfm_ctx(desc->tfm); 479 struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
364 int ret; 480 struct blkcipher_walk walk;
365
366 /* only use complete blocks */
367 nbytes &= ~(DES3_192_BLOCK_SIZE - 1);
368 ret = crypt_s390_km(KM_TDEA_192_DECRYPT, sctx->key, out, in, nbytes);
369 BUG_ON((ret < 0) || (ret != nbytes));
370 481
371 return nbytes; 482 blkcipher_walk_init(&walk, dst, src, nbytes);
483 return ecb_desall_crypt(desc, KM_TDEA_192_DECRYPT, sctx->key, &walk);
372} 484}
373 485
374static unsigned int des3_192_encrypt_cbc(const struct cipher_desc *desc, 486static struct crypto_alg ecb_des3_192_alg = {
375 u8 *out, const u8 *in, 487 .cra_name = "ecb(des3_ede)",
376 unsigned int nbytes) 488 .cra_driver_name = "ecb-des3_ede-s390",
377{ 489 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
378 struct crypt_s390_des3_192_ctx *sctx = crypto_tfm_ctx(desc->tfm); 490 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
379 int ret; 491 .cra_blocksize = DES3_192_BLOCK_SIZE,
380 492 .cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx),
381 /* only use complete blocks */ 493 .cra_type = &crypto_blkcipher_type,
382 nbytes &= ~(DES3_192_BLOCK_SIZE - 1); 494 .cra_module = THIS_MODULE,
495 .cra_list = LIST_HEAD_INIT(
496 ecb_des3_192_alg.cra_list),
497 .cra_u = {
498 .blkcipher = {
499 .min_keysize = DES3_192_KEY_SIZE,
500 .max_keysize = DES3_192_KEY_SIZE,
501 .setkey = des3_192_setkey,
502 .encrypt = ecb_des3_192_encrypt,
503 .decrypt = ecb_des3_192_decrypt,
504 }
505 }
506};
383 507
384 memcpy(sctx->iv, desc->info, DES3_192_BLOCK_SIZE); 508static int cbc_des3_192_encrypt(struct blkcipher_desc *desc,
385 ret = crypt_s390_kmc(KMC_TDEA_192_ENCRYPT, &sctx->iv, out, in, nbytes); 509 struct scatterlist *dst,
386 BUG_ON((ret < 0) || (ret != nbytes)); 510 struct scatterlist *src, unsigned int nbytes)
511{
512 struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
513 struct blkcipher_walk walk;
387 514
388 memcpy(desc->info, sctx->iv, DES3_192_BLOCK_SIZE); 515 blkcipher_walk_init(&walk, dst, src, nbytes);
389 return nbytes; 516 return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, sctx->iv, &walk);
390} 517}
391 518
392static unsigned int des3_192_decrypt_cbc(const struct cipher_desc *desc, 519static int cbc_des3_192_decrypt(struct blkcipher_desc *desc,
393 u8 *out, const u8 *in, 520 struct scatterlist *dst,
394 unsigned int nbytes) 521 struct scatterlist *src, unsigned int nbytes)
395{ 522{
396 struct crypt_s390_des3_192_ctx *sctx = crypto_tfm_ctx(desc->tfm); 523 struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
397 int ret; 524 struct blkcipher_walk walk;
398 525
399 /* only use complete blocks */ 526 blkcipher_walk_init(&walk, dst, src, nbytes);
400 nbytes &= ~(DES3_192_BLOCK_SIZE - 1); 527 return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, sctx->iv, &walk);
401
402 memcpy(&sctx->iv, desc->info, DES3_192_BLOCK_SIZE);
403 ret = crypt_s390_kmc(KMC_TDEA_192_DECRYPT, &sctx->iv, out, in, nbytes);
404 BUG_ON((ret < 0) || (ret != nbytes));
405
406 return nbytes;
407} 528}
408 529
409static struct crypto_alg des3_192_alg = { 530static struct crypto_alg cbc_des3_192_alg = {
410 .cra_name = "des3_ede", 531 .cra_name = "cbc(des3_ede)",
411 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 532 .cra_driver_name = "cbc-des3_ede-s390",
533 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
534 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
412 .cra_blocksize = DES3_192_BLOCK_SIZE, 535 .cra_blocksize = DES3_192_BLOCK_SIZE,
413 .cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx), 536 .cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx),
537 .cra_type = &crypto_blkcipher_type,
414 .cra_module = THIS_MODULE, 538 .cra_module = THIS_MODULE,
415 .cra_list = LIST_HEAD_INIT(des3_192_alg.cra_list), 539 .cra_list = LIST_HEAD_INIT(
540 cbc_des3_192_alg.cra_list),
416 .cra_u = { 541 .cra_u = {
417 .cipher = { 542 .blkcipher = {
418 .cia_min_keysize = DES3_192_KEY_SIZE, 543 .min_keysize = DES3_192_KEY_SIZE,
419 .cia_max_keysize = DES3_192_KEY_SIZE, 544 .max_keysize = DES3_192_KEY_SIZE,
420 .cia_setkey = des3_192_setkey, 545 .ivsize = DES3_192_BLOCK_SIZE,
421 .cia_encrypt = des3_192_encrypt, 546 .setkey = des3_192_setkey,
422 .cia_decrypt = des3_192_decrypt, 547 .encrypt = cbc_des3_192_encrypt,
423 .cia_encrypt_ecb = des3_192_encrypt_ecb, 548 .decrypt = cbc_des3_192_decrypt,
424 .cia_decrypt_ecb = des3_192_decrypt_ecb,
425 .cia_encrypt_cbc = des3_192_encrypt_cbc,
426 .cia_decrypt_cbc = des3_192_decrypt_cbc,
427 } 549 }
428 } 550 }
429}; 551};
@@ -437,22 +559,69 @@ static int init(void)
437 !crypt_s390_func_available(KM_TDEA_192_ENCRYPT)) 559 !crypt_s390_func_available(KM_TDEA_192_ENCRYPT))
438 return -ENOSYS; 560 return -ENOSYS;
439 561
440 ret |= (crypto_register_alg(&des_alg) == 0) ? 0:1; 562 ret = crypto_register_alg(&des_alg);
441 ret |= (crypto_register_alg(&des3_128_alg) == 0) ? 0:2; 563 if (ret)
442 ret |= (crypto_register_alg(&des3_192_alg) == 0) ? 0:4; 564 goto des_err;
443 if (ret) { 565 ret = crypto_register_alg(&ecb_des_alg);
444 crypto_unregister_alg(&des3_192_alg); 566 if (ret)
445 crypto_unregister_alg(&des3_128_alg); 567 goto ecb_des_err;
446 crypto_unregister_alg(&des_alg); 568 ret = crypto_register_alg(&cbc_des_alg);
447 return -EEXIST; 569 if (ret)
448 } 570 goto cbc_des_err;
449 return 0; 571
572 ret = crypto_register_alg(&des3_128_alg);
573 if (ret)
574 goto des3_128_err;
575 ret = crypto_register_alg(&ecb_des3_128_alg);
576 if (ret)
577 goto ecb_des3_128_err;
578 ret = crypto_register_alg(&cbc_des3_128_alg);
579 if (ret)
580 goto cbc_des3_128_err;
581
582 ret = crypto_register_alg(&des3_192_alg);
583 if (ret)
584 goto des3_192_err;
585 ret = crypto_register_alg(&ecb_des3_192_alg);
586 if (ret)
587 goto ecb_des3_192_err;
588 ret = crypto_register_alg(&cbc_des3_192_alg);
589 if (ret)
590 goto cbc_des3_192_err;
591
592out:
593 return ret;
594
595cbc_des3_192_err:
596 crypto_unregister_alg(&ecb_des3_192_alg);
597ecb_des3_192_err:
598 crypto_unregister_alg(&des3_192_alg);
599des3_192_err:
600 crypto_unregister_alg(&cbc_des3_128_alg);
601cbc_des3_128_err:
602 crypto_unregister_alg(&ecb_des3_128_alg);
603ecb_des3_128_err:
604 crypto_unregister_alg(&des3_128_alg);
605des3_128_err:
606 crypto_unregister_alg(&cbc_des_alg);
607cbc_des_err:
608 crypto_unregister_alg(&ecb_des_alg);
609ecb_des_err:
610 crypto_unregister_alg(&des_alg);
611des_err:
612 goto out;
450} 613}
451 614
452static void __exit fini(void) 615static void __exit fini(void)
453{ 616{
617 crypto_unregister_alg(&cbc_des3_192_alg);
618 crypto_unregister_alg(&ecb_des3_192_alg);
454 crypto_unregister_alg(&des3_192_alg); 619 crypto_unregister_alg(&des3_192_alg);
620 crypto_unregister_alg(&cbc_des3_128_alg);
621 crypto_unregister_alg(&ecb_des3_128_alg);
455 crypto_unregister_alg(&des3_128_alg); 622 crypto_unregister_alg(&des3_128_alg);
623 crypto_unregister_alg(&cbc_des_alg);
624 crypto_unregister_alg(&ecb_des_alg);
456 crypto_unregister_alg(&des_alg); 625 crypto_unregister_alg(&des_alg);
457} 626}
458 627
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index 9d34a35b1aa5..49ca8690ee39 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -126,6 +126,8 @@ static void sha1_final(struct crypto_tfm *tfm, u8 *out)
126 126
127static struct crypto_alg alg = { 127static struct crypto_alg alg = {
128 .cra_name = "sha1", 128 .cra_name = "sha1",
129 .cra_driver_name = "sha1-s390",
130 .cra_priority = CRYPT_S390_PRIORITY,
129 .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 131 .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
130 .cra_blocksize = SHA1_BLOCK_SIZE, 132 .cra_blocksize = SHA1_BLOCK_SIZE,
131 .cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx), 133 .cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx),
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index f573df30f31d..8e4e67503fe7 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -127,6 +127,8 @@ static void sha256_final(struct crypto_tfm *tfm, u8 *out)
127 127
128static struct crypto_alg alg = { 128static struct crypto_alg alg = {
129 .cra_name = "sha256", 129 .cra_name = "sha256",
130 .cra_driver_name = "sha256-s390",
131 .cra_priority = CRYPT_S390_PRIORITY,
130 .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 132 .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
131 .cra_blocksize = SHA256_BLOCK_SIZE, 133 .cra_blocksize = SHA256_BLOCK_SIZE,
132 .cra_ctxsize = sizeof(struct s390_sha256_ctx), 134 .cra_ctxsize = sizeof(struct s390_sha256_ctx),
diff --git a/arch/x86_64/crypto/Makefile b/arch/x86_64/crypto/Makefile
index 426d20f4b72e..15b538a8b7f7 100644
--- a/arch/x86_64/crypto/Makefile
+++ b/arch/x86_64/crypto/Makefile
@@ -5,5 +5,8 @@
5# 5#
6 6
7obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o 7obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o
8obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
8 9
9aes-x86_64-y := aes-x86_64-asm.o aes.o 10aes-x86_64-y := aes-x86_64-asm.o aes.o
11twofish-x86_64-y := twofish-x86_64-asm.o twofish.o
12
diff --git a/arch/x86_64/crypto/aes.c b/arch/x86_64/crypto/aes.c
index 68866fab37aa..5cdb13ea5cc2 100644
--- a/arch/x86_64/crypto/aes.c
+++ b/arch/x86_64/crypto/aes.c
@@ -228,13 +228,14 @@ static void __init gen_tabs(void)
228} 228}
229 229
230static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 230static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
231 unsigned int key_len, u32 *flags) 231 unsigned int key_len)
232{ 232{
233 struct aes_ctx *ctx = crypto_tfm_ctx(tfm); 233 struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
234 const __le32 *key = (const __le32 *)in_key; 234 const __le32 *key = (const __le32 *)in_key;
235 u32 *flags = &tfm->crt_flags;
235 u32 i, j, t, u, v, w; 236 u32 i, j, t, u, v, w;
236 237
237 if (key_len != 16 && key_len != 24 && key_len != 32) { 238 if (key_len % 8) {
238 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 239 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
239 return -EINVAL; 240 return -EINVAL;
240 } 241 }
diff --git a/arch/x86_64/crypto/twofish-x86_64-asm.S b/arch/x86_64/crypto/twofish-x86_64-asm.S
new file mode 100644
index 000000000000..35974a586615
--- /dev/null
+++ b/arch/x86_64/crypto/twofish-x86_64-asm.S
@@ -0,0 +1,324 @@
1/***************************************************************************
2* Copyright (C) 2006 by Joachim Fritschi, <jfritschi@freenet.de> *
3* *
4* This program is free software; you can redistribute it and/or modify *
5* it under the terms of the GNU General Public License as published by *
6* the Free Software Foundation; either version 2 of the License, or *
7* (at your option) any later version. *
8* *
9* This program is distributed in the hope that it will be useful, *
10* but WITHOUT ANY WARRANTY; without even the implied warranty of *
11* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12* GNU General Public License for more details. *
13* *
14* You should have received a copy of the GNU General Public License *
15* along with this program; if not, write to the *
16* Free Software Foundation, Inc., *
17* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
18***************************************************************************/
19
20.file "twofish-x86_64-asm.S"
21.text
22
23#include <asm/asm-offsets.h>
24
25#define a_offset 0
26#define b_offset 4
27#define c_offset 8
28#define d_offset 12
29
30/* Structure of the crypto context struct*/
31
32#define s0 0 /* S0 Array 256 Words each */
33#define s1 1024 /* S1 Array */
34#define s2 2048 /* S2 Array */
35#define s3 3072 /* S3 Array */
36#define w 4096 /* 8 whitening keys (word) */
37#define k 4128 /* key 1-32 ( word ) */
38
39/* define a few register aliases to allow macro substitution */
40
41#define R0 %rax
42#define R0D %eax
43#define R0B %al
44#define R0H %ah
45
46#define R1 %rbx
47#define R1D %ebx
48#define R1B %bl
49#define R1H %bh
50
51#define R2 %rcx
52#define R2D %ecx
53#define R2B %cl
54#define R2H %ch
55
56#define R3 %rdx
57#define R3D %edx
58#define R3B %dl
59#define R3H %dh
60
61
62/* performs input whitening */
63#define input_whitening(src,context,offset)\
64 xor w+offset(context), src;
65
66/* performs input whitening */
67#define output_whitening(src,context,offset)\
68 xor w+16+offset(context), src;
69
70
71/*
72 * a input register containing a (rotated 16)
73 * b input register containing b
74 * c input register containing c
75 * d input register containing d (already rol $1)
76 * operations on a and b are interleaved to increase performance
77 */
78#define encrypt_round(a,b,c,d,round)\
79 movzx b ## B, %edi;\
80 mov s1(%r11,%rdi,4),%r8d;\
81 movzx a ## B, %edi;\
82 mov s2(%r11,%rdi,4),%r9d;\
83 movzx b ## H, %edi;\
84 ror $16, b ## D;\
85 xor s2(%r11,%rdi,4),%r8d;\
86 movzx a ## H, %edi;\
87 ror $16, a ## D;\
88 xor s3(%r11,%rdi,4),%r9d;\
89 movzx b ## B, %edi;\
90 xor s3(%r11,%rdi,4),%r8d;\
91 movzx a ## B, %edi;\
92 xor (%r11,%rdi,4), %r9d;\
93 movzx b ## H, %edi;\
94 ror $15, b ## D;\
95 xor (%r11,%rdi,4), %r8d;\
96 movzx a ## H, %edi;\
97 xor s1(%r11,%rdi,4),%r9d;\
98 add %r8d, %r9d;\
99 add %r9d, %r8d;\
100 add k+round(%r11), %r9d;\
101 xor %r9d, c ## D;\
102 rol $15, c ## D;\
103 add k+4+round(%r11),%r8d;\
104 xor %r8d, d ## D;
105
106/*
107 * a input register containing a(rotated 16)
108 * b input register containing b
109 * c input register containing c
110 * d input register containing d (already rol $1)
111 * operations on a and b are interleaved to increase performance
112 * during the round a and b are prepared for the output whitening
113 */
114#define encrypt_last_round(a,b,c,d,round)\
115 mov b ## D, %r10d;\
116 shl $32, %r10;\
117 movzx b ## B, %edi;\
118 mov s1(%r11,%rdi,4),%r8d;\
119 movzx a ## B, %edi;\
120 mov s2(%r11,%rdi,4),%r9d;\
121 movzx b ## H, %edi;\
122 ror $16, b ## D;\
123 xor s2(%r11,%rdi,4),%r8d;\
124 movzx a ## H, %edi;\
125 ror $16, a ## D;\
126 xor s3(%r11,%rdi,4),%r9d;\
127 movzx b ## B, %edi;\
128 xor s3(%r11,%rdi,4),%r8d;\
129 movzx a ## B, %edi;\
130 xor (%r11,%rdi,4), %r9d;\
131 xor a, %r10;\
132 movzx b ## H, %edi;\
133 xor (%r11,%rdi,4), %r8d;\
134 movzx a ## H, %edi;\
135 xor s1(%r11,%rdi,4),%r9d;\
136 add %r8d, %r9d;\
137 add %r9d, %r8d;\
138 add k+round(%r11), %r9d;\
139 xor %r9d, c ## D;\
140 ror $1, c ## D;\
141 add k+4+round(%r11),%r8d;\
142 xor %r8d, d ## D
143
144/*
145 * a input register containing a
146 * b input register containing b (rotated 16)
147 * c input register containing c (already rol $1)
148 * d input register containing d
149 * operations on a and b are interleaved to increase performance
150 */
151#define decrypt_round(a,b,c,d,round)\
152 movzx a ## B, %edi;\
153 mov (%r11,%rdi,4), %r9d;\
154 movzx b ## B, %edi;\
155 mov s3(%r11,%rdi,4),%r8d;\
156 movzx a ## H, %edi;\
157 ror $16, a ## D;\
158 xor s1(%r11,%rdi,4),%r9d;\
159 movzx b ## H, %edi;\
160 ror $16, b ## D;\
161 xor (%r11,%rdi,4), %r8d;\
162 movzx a ## B, %edi;\
163 xor s2(%r11,%rdi,4),%r9d;\
164 movzx b ## B, %edi;\
165 xor s1(%r11,%rdi,4),%r8d;\
166 movzx a ## H, %edi;\
167 ror $15, a ## D;\
168 xor s3(%r11,%rdi,4),%r9d;\
169 movzx b ## H, %edi;\
170 xor s2(%r11,%rdi,4),%r8d;\
171 add %r8d, %r9d;\
172 add %r9d, %r8d;\
173 add k+round(%r11), %r9d;\
174 xor %r9d, c ## D;\
175 add k+4+round(%r11),%r8d;\
176 xor %r8d, d ## D;\
177 rol $15, d ## D;
178
179/*
180 * a input register containing a
181 * b input register containing b
182 * c input register containing c (already rol $1)
183 * d input register containing d
184 * operations on a and b are interleaved to increase performance
185 * during the round a and b are prepared for the output whitening
186 */
187#define decrypt_last_round(a,b,c,d,round)\
188 movzx a ## B, %edi;\
189 mov (%r11,%rdi,4), %r9d;\
190 movzx b ## B, %edi;\
191 mov s3(%r11,%rdi,4),%r8d;\
192 movzx b ## H, %edi;\
193 ror $16, b ## D;\
194 xor (%r11,%rdi,4), %r8d;\
195 movzx a ## H, %edi;\
196 mov b ## D, %r10d;\
197 shl $32, %r10;\
198 xor a, %r10;\
199 ror $16, a ## D;\
200 xor s1(%r11,%rdi,4),%r9d;\
201 movzx b ## B, %edi;\
202 xor s1(%r11,%rdi,4),%r8d;\
203 movzx a ## B, %edi;\
204 xor s2(%r11,%rdi,4),%r9d;\
205 movzx b ## H, %edi;\
206 xor s2(%r11,%rdi,4),%r8d;\
207 movzx a ## H, %edi;\
208 xor s3(%r11,%rdi,4),%r9d;\
209 add %r8d, %r9d;\
210 add %r9d, %r8d;\
211 add k+round(%r11), %r9d;\
212 xor %r9d, c ## D;\
213 add k+4+round(%r11),%r8d;\
214 xor %r8d, d ## D;\
215 ror $1, d ## D;
216
217.align 8
218.global twofish_enc_blk
219.global twofish_dec_blk
220
221twofish_enc_blk:
222 pushq R1
223
224 /* %rdi contains the crypto tfm adress */
225 /* %rsi contains the output adress */
226 /* %rdx contains the input adress */
227 add $crypto_tfm_ctx_offset, %rdi /* set ctx adress */
228 /* ctx adress is moved to free one non-rex register
229 as target for the 8bit high operations */
230 mov %rdi, %r11
231
232 movq (R3), R1
233 movq 8(R3), R3
234 input_whitening(R1,%r11,a_offset)
235 input_whitening(R3,%r11,c_offset)
236 mov R1D, R0D
237 rol $16, R0D
238 shr $32, R1
239 mov R3D, R2D
240 shr $32, R3
241 rol $1, R3D
242
243 encrypt_round(R0,R1,R2,R3,0);
244 encrypt_round(R2,R3,R0,R1,8);
245 encrypt_round(R0,R1,R2,R3,2*8);
246 encrypt_round(R2,R3,R0,R1,3*8);
247 encrypt_round(R0,R1,R2,R3,4*8);
248 encrypt_round(R2,R3,R0,R1,5*8);
249 encrypt_round(R0,R1,R2,R3,6*8);
250 encrypt_round(R2,R3,R0,R1,7*8);
251 encrypt_round(R0,R1,R2,R3,8*8);
252 encrypt_round(R2,R3,R0,R1,9*8);
253 encrypt_round(R0,R1,R2,R3,10*8);
254 encrypt_round(R2,R3,R0,R1,11*8);
255 encrypt_round(R0,R1,R2,R3,12*8);
256 encrypt_round(R2,R3,R0,R1,13*8);
257 encrypt_round(R0,R1,R2,R3,14*8);
258 encrypt_last_round(R2,R3,R0,R1,15*8);
259
260
261 output_whitening(%r10,%r11,a_offset)
262 movq %r10, (%rsi)
263
264 shl $32, R1
265 xor R0, R1
266
267 output_whitening(R1,%r11,c_offset)
268 movq R1, 8(%rsi)
269
270 popq R1
271 movq $1,%rax
272 ret
273
274twofish_dec_blk:
275 pushq R1
276
277 /* %rdi contains the crypto tfm adress */
278 /* %rsi contains the output adress */
279 /* %rdx contains the input adress */
280 add $crypto_tfm_ctx_offset, %rdi /* set ctx adress */
281 /* ctx adress is moved to free one non-rex register
282 as target for the 8bit high operations */
283 mov %rdi, %r11
284
285 movq (R3), R1
286 movq 8(R3), R3
287 output_whitening(R1,%r11,a_offset)
288 output_whitening(R3,%r11,c_offset)
289 mov R1D, R0D
290 shr $32, R1
291 rol $16, R1D
292 mov R3D, R2D
293 shr $32, R3
294 rol $1, R2D
295
296 decrypt_round(R0,R1,R2,R3,15*8);
297 decrypt_round(R2,R3,R0,R1,14*8);
298 decrypt_round(R0,R1,R2,R3,13*8);
299 decrypt_round(R2,R3,R0,R1,12*8);
300 decrypt_round(R0,R1,R2,R3,11*8);
301 decrypt_round(R2,R3,R0,R1,10*8);
302 decrypt_round(R0,R1,R2,R3,9*8);
303 decrypt_round(R2,R3,R0,R1,8*8);
304 decrypt_round(R0,R1,R2,R3,7*8);
305 decrypt_round(R2,R3,R0,R1,6*8);
306 decrypt_round(R0,R1,R2,R3,5*8);
307 decrypt_round(R2,R3,R0,R1,4*8);
308 decrypt_round(R0,R1,R2,R3,3*8);
309 decrypt_round(R2,R3,R0,R1,2*8);
310 decrypt_round(R0,R1,R2,R3,1*8);
311 decrypt_last_round(R2,R3,R0,R1,0);
312
313 input_whitening(%r10,%r11,a_offset)
314 movq %r10, (%rsi)
315
316 shl $32, R1
317 xor R0, R1
318
319 input_whitening(R1,%r11,c_offset)
320 movq R1, 8(%rsi)
321
322 popq R1
323 movq $1,%rax
324 ret
diff --git a/arch/x86_64/crypto/twofish.c b/arch/x86_64/crypto/twofish.c
new file mode 100644
index 000000000000..182d91d5cfb9
--- /dev/null
+++ b/arch/x86_64/crypto/twofish.c
@@ -0,0 +1,97 @@
1/*
2 * Glue Code for optimized x86_64 assembler version of TWOFISH
3 *
4 * Originally Twofish for GPG
5 * By Matthew Skala <mskala@ansuz.sooke.bc.ca>, July 26, 1998
6 * 256-bit key length added March 20, 1999
7 * Some modifications to reduce the text size by Werner Koch, April, 1998
8 * Ported to the kerneli patch by Marc Mutz <Marc@Mutz.com>
9 * Ported to CryptoAPI by Colin Slater <hoho@tacomeat.net>
10 *
11 * The original author has disclaimed all copyright interest in this
12 * code and thus put it in the public domain. The subsequent authors
13 * have put this under the GNU General Public License.
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
28 * USA
29 *
30 * This code is a "clean room" implementation, written from the paper
31 * _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey,
32 * Doug Whiting, David Wagner, Chris Hall, and Niels Ferguson, available
33 * through http://www.counterpane.com/twofish.html
34 *
35 * For background information on multiplication in finite fields, used for
36 * the matrix operations in the key schedule, see the book _Contemporary
37 * Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the
38 * Third Edition.
39 */
40
41#include <crypto/twofish.h>
42#include <linux/crypto.h>
43#include <linux/init.h>
44#include <linux/kernel.h>
45#include <linux/module.h>
46#include <linux/types.h>
47
48asmlinkage void twofish_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
49asmlinkage void twofish_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
50
51static void twofish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
52{
53 twofish_enc_blk(tfm, dst, src);
54}
55
56static void twofish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
57{
58 twofish_dec_blk(tfm, dst, src);
59}
60
61static struct crypto_alg alg = {
62 .cra_name = "twofish",
63 .cra_driver_name = "twofish-x86_64",
64 .cra_priority = 200,
65 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
66 .cra_blocksize = TF_BLOCK_SIZE,
67 .cra_ctxsize = sizeof(struct twofish_ctx),
68 .cra_alignmask = 3,
69 .cra_module = THIS_MODULE,
70 .cra_list = LIST_HEAD_INIT(alg.cra_list),
71 .cra_u = {
72 .cipher = {
73 .cia_min_keysize = TF_MIN_KEY_SIZE,
74 .cia_max_keysize = TF_MAX_KEY_SIZE,
75 .cia_setkey = twofish_setkey,
76 .cia_encrypt = twofish_encrypt,
77 .cia_decrypt = twofish_decrypt
78 }
79 }
80};
81
82static int __init init(void)
83{
84 return crypto_register_alg(&alg);
85}
86
87static void __exit fini(void)
88{
89 crypto_unregister_alg(&alg);
90}
91
92module_init(init);
93module_exit(fini);
94
95MODULE_LICENSE("GPL");
96MODULE_DESCRIPTION ("Twofish Cipher Algorithm, x86_64 asm optimized");
97MODULE_ALIAS("twofish");
diff --git a/crypto/Kconfig b/crypto/Kconfig
index ba133d557045..1e2f39c21180 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -9,47 +9,71 @@ config CRYPTO
9 help 9 help
10 This option provides the core Cryptographic API. 10 This option provides the core Cryptographic API.
11 11
12if CRYPTO
13
14config CRYPTO_ALGAPI
15 tristate
16 help
17 This option provides the API for cryptographic algorithms.
18
19config CRYPTO_BLKCIPHER
20 tristate
21 select CRYPTO_ALGAPI
22
23config CRYPTO_HASH
24 tristate
25 select CRYPTO_ALGAPI
26
27config CRYPTO_MANAGER
28 tristate "Cryptographic algorithm manager"
29 select CRYPTO_ALGAPI
30 default m
31 help
32 Create default cryptographic template instantiations such as
33 cbc(aes).
34
12config CRYPTO_HMAC 35config CRYPTO_HMAC
13 bool "HMAC support" 36 tristate "HMAC support"
14 depends on CRYPTO 37 select CRYPTO_HASH
15 help 38 help
16 HMAC: Keyed-Hashing for Message Authentication (RFC2104). 39 HMAC: Keyed-Hashing for Message Authentication (RFC2104).
17 This is required for IPSec. 40 This is required for IPSec.
18 41
19config CRYPTO_NULL 42config CRYPTO_NULL
20 tristate "Null algorithms" 43 tristate "Null algorithms"
21 depends on CRYPTO 44 select CRYPTO_ALGAPI
22 help 45 help
23 These are 'Null' algorithms, used by IPsec, which do nothing. 46 These are 'Null' algorithms, used by IPsec, which do nothing.
24 47
25config CRYPTO_MD4 48config CRYPTO_MD4
26 tristate "MD4 digest algorithm" 49 tristate "MD4 digest algorithm"
27 depends on CRYPTO 50 select CRYPTO_ALGAPI
28 help 51 help
29 MD4 message digest algorithm (RFC1320). 52 MD4 message digest algorithm (RFC1320).
30 53
31config CRYPTO_MD5 54config CRYPTO_MD5
32 tristate "MD5 digest algorithm" 55 tristate "MD5 digest algorithm"
33 depends on CRYPTO 56 select CRYPTO_ALGAPI
34 help 57 help
35 MD5 message digest algorithm (RFC1321). 58 MD5 message digest algorithm (RFC1321).
36 59
37config CRYPTO_SHA1 60config CRYPTO_SHA1
38 tristate "SHA1 digest algorithm" 61 tristate "SHA1 digest algorithm"
39 depends on CRYPTO 62 select CRYPTO_ALGAPI
40 help 63 help
41 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). 64 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
42 65
43config CRYPTO_SHA1_S390 66config CRYPTO_SHA1_S390
44 tristate "SHA1 digest algorithm (s390)" 67 tristate "SHA1 digest algorithm (s390)"
45 depends on CRYPTO && S390 68 depends on S390
69 select CRYPTO_ALGAPI
46 help 70 help
47 This is the s390 hardware accelerated implementation of the 71 This is the s390 hardware accelerated implementation of the
48 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). 72 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
49 73
50config CRYPTO_SHA256 74config CRYPTO_SHA256
51 tristate "SHA256 digest algorithm" 75 tristate "SHA256 digest algorithm"
52 depends on CRYPTO 76 select CRYPTO_ALGAPI
53 help 77 help
54 SHA256 secure hash standard (DFIPS 180-2). 78 SHA256 secure hash standard (DFIPS 180-2).
55 79
@@ -58,7 +82,8 @@ config CRYPTO_SHA256
58 82
59config CRYPTO_SHA256_S390 83config CRYPTO_SHA256_S390
60 tristate "SHA256 digest algorithm (s390)" 84 tristate "SHA256 digest algorithm (s390)"
61 depends on CRYPTO && S390 85 depends on S390
86 select CRYPTO_ALGAPI
62 help 87 help
63 This is the s390 hardware accelerated implementation of the 88 This is the s390 hardware accelerated implementation of the
64 SHA256 secure hash standard (DFIPS 180-2). 89 SHA256 secure hash standard (DFIPS 180-2).
@@ -68,7 +93,7 @@ config CRYPTO_SHA256_S390
68 93
69config CRYPTO_SHA512 94config CRYPTO_SHA512
70 tristate "SHA384 and SHA512 digest algorithms" 95 tristate "SHA384 and SHA512 digest algorithms"
71 depends on CRYPTO 96 select CRYPTO_ALGAPI
72 help 97 help
73 SHA512 secure hash standard (DFIPS 180-2). 98 SHA512 secure hash standard (DFIPS 180-2).
74 99
@@ -80,7 +105,7 @@ config CRYPTO_SHA512
80 105
81config CRYPTO_WP512 106config CRYPTO_WP512
82 tristate "Whirlpool digest algorithms" 107 tristate "Whirlpool digest algorithms"
83 depends on CRYPTO 108 select CRYPTO_ALGAPI
84 help 109 help
85 Whirlpool hash algorithm 512, 384 and 256-bit hashes 110 Whirlpool hash algorithm 512, 384 and 256-bit hashes
86 111
@@ -92,7 +117,7 @@ config CRYPTO_WP512
92 117
93config CRYPTO_TGR192 118config CRYPTO_TGR192
94 tristate "Tiger digest algorithms" 119 tristate "Tiger digest algorithms"
95 depends on CRYPTO 120 select CRYPTO_ALGAPI
96 help 121 help
97 Tiger hash algorithm 192, 160 and 128-bit hashes 122 Tiger hash algorithm 192, 160 and 128-bit hashes
98 123
@@ -103,21 +128,40 @@ config CRYPTO_TGR192
103 See also: 128 See also:
104 <http://www.cs.technion.ac.il/~biham/Reports/Tiger/>. 129 <http://www.cs.technion.ac.il/~biham/Reports/Tiger/>.
105 130
131config CRYPTO_ECB
132 tristate "ECB support"
133 select CRYPTO_BLKCIPHER
134 default m
135 help
136 ECB: Electronic CodeBook mode
137 This is the simplest block cipher algorithm. It simply encrypts
138 the input block by block.
139
140config CRYPTO_CBC
141 tristate "CBC support"
142 select CRYPTO_BLKCIPHER
143 default m
144 help
145 CBC: Cipher Block Chaining mode
146 This block cipher algorithm is required for IPSec.
147
106config CRYPTO_DES 148config CRYPTO_DES
107 tristate "DES and Triple DES EDE cipher algorithms" 149 tristate "DES and Triple DES EDE cipher algorithms"
108 depends on CRYPTO 150 select CRYPTO_ALGAPI
109 help 151 help
110 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). 152 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
111 153
112config CRYPTO_DES_S390 154config CRYPTO_DES_S390
113 tristate "DES and Triple DES cipher algorithms (s390)" 155 tristate "DES and Triple DES cipher algorithms (s390)"
114 depends on CRYPTO && S390 156 depends on S390
157 select CRYPTO_ALGAPI
158 select CRYPTO_BLKCIPHER
115 help 159 help
116 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). 160 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
117 161
118config CRYPTO_BLOWFISH 162config CRYPTO_BLOWFISH
119 tristate "Blowfish cipher algorithm" 163 tristate "Blowfish cipher algorithm"
120 depends on CRYPTO 164 select CRYPTO_ALGAPI
121 help 165 help
122 Blowfish cipher algorithm, by Bruce Schneier. 166 Blowfish cipher algorithm, by Bruce Schneier.
123 167
@@ -130,7 +174,8 @@ config CRYPTO_BLOWFISH
130 174
131config CRYPTO_TWOFISH 175config CRYPTO_TWOFISH
132 tristate "Twofish cipher algorithm" 176 tristate "Twofish cipher algorithm"
133 depends on CRYPTO 177 select CRYPTO_ALGAPI
178 select CRYPTO_TWOFISH_COMMON
134 help 179 help
135 Twofish cipher algorithm. 180 Twofish cipher algorithm.
136 181
@@ -142,9 +187,47 @@ config CRYPTO_TWOFISH
142 See also: 187 See also:
143 <http://www.schneier.com/twofish.html> 188 <http://www.schneier.com/twofish.html>
144 189
190config CRYPTO_TWOFISH_COMMON
191 tristate
192 help
193 Common parts of the Twofish cipher algorithm shared by the
194 generic c and the assembler implementations.
195
196config CRYPTO_TWOFISH_586
197 tristate "Twofish cipher algorithms (i586)"
198 depends on (X86 || UML_X86) && !64BIT
199 select CRYPTO_ALGAPI
200 select CRYPTO_TWOFISH_COMMON
201 help
202 Twofish cipher algorithm.
203
204 Twofish was submitted as an AES (Advanced Encryption Standard)
205 candidate cipher by researchers at CounterPane Systems. It is a
206 16 round block cipher supporting key sizes of 128, 192, and 256
207 bits.
208
209 See also:
210 <http://www.schneier.com/twofish.html>
211
212config CRYPTO_TWOFISH_X86_64
213 tristate "Twofish cipher algorithm (x86_64)"
214 depends on (X86 || UML_X86) && 64BIT
215 select CRYPTO_ALGAPI
216 select CRYPTO_TWOFISH_COMMON
217 help
218 Twofish cipher algorithm (x86_64).
219
220 Twofish was submitted as an AES (Advanced Encryption Standard)
221 candidate cipher by researchers at CounterPane Systems. It is a
222 16 round block cipher supporting key sizes of 128, 192, and 256
223 bits.
224
225 See also:
226 <http://www.schneier.com/twofish.html>
227
145config CRYPTO_SERPENT 228config CRYPTO_SERPENT
146 tristate "Serpent cipher algorithm" 229 tristate "Serpent cipher algorithm"
147 depends on CRYPTO 230 select CRYPTO_ALGAPI
148 help 231 help
149 Serpent cipher algorithm, by Anderson, Biham & Knudsen. 232 Serpent cipher algorithm, by Anderson, Biham & Knudsen.
150 233
@@ -157,7 +240,7 @@ config CRYPTO_SERPENT
157 240
158config CRYPTO_AES 241config CRYPTO_AES
159 tristate "AES cipher algorithms" 242 tristate "AES cipher algorithms"
160 depends on CRYPTO 243 select CRYPTO_ALGAPI
161 help 244 help
162 AES cipher algorithms (FIPS-197). AES uses the Rijndael 245 AES cipher algorithms (FIPS-197). AES uses the Rijndael
163 algorithm. 246 algorithm.
@@ -177,7 +260,8 @@ config CRYPTO_AES
177 260
178config CRYPTO_AES_586 261config CRYPTO_AES_586
179 tristate "AES cipher algorithms (i586)" 262 tristate "AES cipher algorithms (i586)"
180 depends on CRYPTO && ((X86 || UML_X86) && !64BIT) 263 depends on (X86 || UML_X86) && !64BIT
264 select CRYPTO_ALGAPI
181 help 265 help
182 AES cipher algorithms (FIPS-197). AES uses the Rijndael 266 AES cipher algorithms (FIPS-197). AES uses the Rijndael
183 algorithm. 267 algorithm.
@@ -197,7 +281,8 @@ config CRYPTO_AES_586
197 281
198config CRYPTO_AES_X86_64 282config CRYPTO_AES_X86_64
199 tristate "AES cipher algorithms (x86_64)" 283 tristate "AES cipher algorithms (x86_64)"
200 depends on CRYPTO && ((X86 || UML_X86) && 64BIT) 284 depends on (X86 || UML_X86) && 64BIT
285 select CRYPTO_ALGAPI
201 help 286 help
202 AES cipher algorithms (FIPS-197). AES uses the Rijndael 287 AES cipher algorithms (FIPS-197). AES uses the Rijndael
203 algorithm. 288 algorithm.
@@ -217,7 +302,9 @@ config CRYPTO_AES_X86_64
217 302
218config CRYPTO_AES_S390 303config CRYPTO_AES_S390
219 tristate "AES cipher algorithms (s390)" 304 tristate "AES cipher algorithms (s390)"
220 depends on CRYPTO && S390 305 depends on S390
306 select CRYPTO_ALGAPI
307 select CRYPTO_BLKCIPHER
221 help 308 help
222 This is the s390 hardware accelerated implementation of the 309 This is the s390 hardware accelerated implementation of the
223 AES cipher algorithms (FIPS-197). AES uses the Rijndael 310 AES cipher algorithms (FIPS-197). AES uses the Rijndael
@@ -237,21 +324,21 @@ config CRYPTO_AES_S390
237 324
238config CRYPTO_CAST5 325config CRYPTO_CAST5
239 tristate "CAST5 (CAST-128) cipher algorithm" 326 tristate "CAST5 (CAST-128) cipher algorithm"
240 depends on CRYPTO 327 select CRYPTO_ALGAPI
241 help 328 help
242 The CAST5 encryption algorithm (synonymous with CAST-128) is 329 The CAST5 encryption algorithm (synonymous with CAST-128) is
243 described in RFC2144. 330 described in RFC2144.
244 331
245config CRYPTO_CAST6 332config CRYPTO_CAST6
246 tristate "CAST6 (CAST-256) cipher algorithm" 333 tristate "CAST6 (CAST-256) cipher algorithm"
247 depends on CRYPTO 334 select CRYPTO_ALGAPI
248 help 335 help
249 The CAST6 encryption algorithm (synonymous with CAST-256) is 336 The CAST6 encryption algorithm (synonymous with CAST-256) is
250 described in RFC2612. 337 described in RFC2612.
251 338
252config CRYPTO_TEA 339config CRYPTO_TEA
253 tristate "TEA, XTEA and XETA cipher algorithms" 340 tristate "TEA, XTEA and XETA cipher algorithms"
254 depends on CRYPTO 341 select CRYPTO_ALGAPI
255 help 342 help
256 TEA cipher algorithm. 343 TEA cipher algorithm.
257 344
@@ -268,7 +355,7 @@ config CRYPTO_TEA
268 355
269config CRYPTO_ARC4 356config CRYPTO_ARC4
270 tristate "ARC4 cipher algorithm" 357 tristate "ARC4 cipher algorithm"
271 depends on CRYPTO 358 select CRYPTO_ALGAPI
272 help 359 help
273 ARC4 cipher algorithm. 360 ARC4 cipher algorithm.
274 361
@@ -279,7 +366,7 @@ config CRYPTO_ARC4
279 366
280config CRYPTO_KHAZAD 367config CRYPTO_KHAZAD
281 tristate "Khazad cipher algorithm" 368 tristate "Khazad cipher algorithm"
282 depends on CRYPTO 369 select CRYPTO_ALGAPI
283 help 370 help
284 Khazad cipher algorithm. 371 Khazad cipher algorithm.
285 372
@@ -292,7 +379,7 @@ config CRYPTO_KHAZAD
292 379
293config CRYPTO_ANUBIS 380config CRYPTO_ANUBIS
294 tristate "Anubis cipher algorithm" 381 tristate "Anubis cipher algorithm"
295 depends on CRYPTO 382 select CRYPTO_ALGAPI
296 help 383 help
297 Anubis cipher algorithm. 384 Anubis cipher algorithm.
298 385
@@ -307,7 +394,7 @@ config CRYPTO_ANUBIS
307 394
308config CRYPTO_DEFLATE 395config CRYPTO_DEFLATE
309 tristate "Deflate compression algorithm" 396 tristate "Deflate compression algorithm"
310 depends on CRYPTO 397 select CRYPTO_ALGAPI
311 select ZLIB_INFLATE 398 select ZLIB_INFLATE
312 select ZLIB_DEFLATE 399 select ZLIB_DEFLATE
313 help 400 help
@@ -318,7 +405,7 @@ config CRYPTO_DEFLATE
318 405
319config CRYPTO_MICHAEL_MIC 406config CRYPTO_MICHAEL_MIC
320 tristate "Michael MIC keyed digest algorithm" 407 tristate "Michael MIC keyed digest algorithm"
321 depends on CRYPTO 408 select CRYPTO_ALGAPI
322 help 409 help
323 Michael MIC is used for message integrity protection in TKIP 410 Michael MIC is used for message integrity protection in TKIP
324 (IEEE 802.11i). This algorithm is required for TKIP, but it 411 (IEEE 802.11i). This algorithm is required for TKIP, but it
@@ -327,7 +414,7 @@ config CRYPTO_MICHAEL_MIC
327 414
328config CRYPTO_CRC32C 415config CRYPTO_CRC32C
329 tristate "CRC32c CRC algorithm" 416 tristate "CRC32c CRC algorithm"
330 depends on CRYPTO 417 select CRYPTO_ALGAPI
331 select LIBCRC32C 418 select LIBCRC32C
332 help 419 help
333 Castagnoli, et al Cyclic Redundancy-Check Algorithm. Used 420 Castagnoli, et al Cyclic Redundancy-Check Algorithm. Used
@@ -337,10 +424,13 @@ config CRYPTO_CRC32C
337 424
338config CRYPTO_TEST 425config CRYPTO_TEST
339 tristate "Testing module" 426 tristate "Testing module"
340 depends on CRYPTO && m 427 depends on m
428 select CRYPTO_ALGAPI
341 help 429 help
342 Quick & dirty crypto test module. 430 Quick & dirty crypto test module.
343 431
344source "drivers/crypto/Kconfig" 432source "drivers/crypto/Kconfig"
345endmenu
346 433
434endif # if CRYPTO
435
436endmenu
diff --git a/crypto/Makefile b/crypto/Makefile
index d287b9e60c47..72366208e291 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -2,11 +2,18 @@
2# Cryptographic API 2# Cryptographic API
3# 3#
4 4
5proc-crypto-$(CONFIG_PROC_FS) = proc.o 5obj-$(CONFIG_CRYPTO) += api.o scatterwalk.o cipher.o digest.o compress.o
6 6
7obj-$(CONFIG_CRYPTO) += api.o scatterwalk.o cipher.o digest.o compress.o \ 7crypto_algapi-$(CONFIG_PROC_FS) += proc.o
8 $(proc-crypto-y) 8crypto_algapi-objs := algapi.o $(crypto_algapi-y)
9obj-$(CONFIG_CRYPTO_ALGAPI) += crypto_algapi.o
9 10
11obj-$(CONFIG_CRYPTO_BLKCIPHER) += blkcipher.o
12
13crypto_hash-objs := hash.o
14obj-$(CONFIG_CRYPTO_HASH) += crypto_hash.o
15
16obj-$(CONFIG_CRYPTO_MANAGER) += cryptomgr.o
10obj-$(CONFIG_CRYPTO_HMAC) += hmac.o 17obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
11obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o 18obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o
12obj-$(CONFIG_CRYPTO_MD4) += md4.o 19obj-$(CONFIG_CRYPTO_MD4) += md4.o
@@ -16,9 +23,12 @@ obj-$(CONFIG_CRYPTO_SHA256) += sha256.o
16obj-$(CONFIG_CRYPTO_SHA512) += sha512.o 23obj-$(CONFIG_CRYPTO_SHA512) += sha512.o
17obj-$(CONFIG_CRYPTO_WP512) += wp512.o 24obj-$(CONFIG_CRYPTO_WP512) += wp512.o
18obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o 25obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
26obj-$(CONFIG_CRYPTO_ECB) += ecb.o
27obj-$(CONFIG_CRYPTO_CBC) += cbc.o
19obj-$(CONFIG_CRYPTO_DES) += des.o 28obj-$(CONFIG_CRYPTO_DES) += des.o
20obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o 29obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o
21obj-$(CONFIG_CRYPTO_TWOFISH) += twofish.o 30obj-$(CONFIG_CRYPTO_TWOFISH) += twofish.o
31obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
22obj-$(CONFIG_CRYPTO_SERPENT) += serpent.o 32obj-$(CONFIG_CRYPTO_SERPENT) += serpent.o
23obj-$(CONFIG_CRYPTO_AES) += aes.o 33obj-$(CONFIG_CRYPTO_AES) += aes.o
24obj-$(CONFIG_CRYPTO_CAST5) += cast5.o 34obj-$(CONFIG_CRYPTO_CAST5) += cast5.o
diff --git a/crypto/aes.c b/crypto/aes.c
index a038711831e7..e2440773878c 100644
--- a/crypto/aes.c
+++ b/crypto/aes.c
@@ -249,13 +249,14 @@ gen_tabs (void)
249} 249}
250 250
251static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 251static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
252 unsigned int key_len, u32 *flags) 252 unsigned int key_len)
253{ 253{
254 struct aes_ctx *ctx = crypto_tfm_ctx(tfm); 254 struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
255 const __le32 *key = (const __le32 *)in_key; 255 const __le32 *key = (const __le32 *)in_key;
256 u32 *flags = &tfm->crt_flags;
256 u32 i, t, u, v, w; 257 u32 i, t, u, v, w;
257 258
258 if (key_len != 16 && key_len != 24 && key_len != 32) { 259 if (key_len % 8) {
259 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 260 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
260 return -EINVAL; 261 return -EINVAL;
261 } 262 }
diff --git a/crypto/algapi.c b/crypto/algapi.c
new file mode 100644
index 000000000000..c91530021e9c
--- /dev/null
+++ b/crypto/algapi.c
@@ -0,0 +1,486 @@
1/*
2 * Cryptographic API for algorithms (i.e., low-level API).
3 *
4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13#include <linux/err.h>
14#include <linux/errno.h>
15#include <linux/init.h>
16#include <linux/kernel.h>
17#include <linux/list.h>
18#include <linux/module.h>
19#include <linux/rtnetlink.h>
20#include <linux/string.h>
21
22#include "internal.h"
23
24static LIST_HEAD(crypto_template_list);
25
26void crypto_larval_error(const char *name, u32 type, u32 mask)
27{
28 struct crypto_alg *alg;
29
30 down_read(&crypto_alg_sem);
31 alg = __crypto_alg_lookup(name, type, mask);
32 up_read(&crypto_alg_sem);
33
34 if (alg) {
35 if (crypto_is_larval(alg)) {
36 struct crypto_larval *larval = (void *)alg;
37 complete(&larval->completion);
38 }
39 crypto_mod_put(alg);
40 }
41}
42EXPORT_SYMBOL_GPL(crypto_larval_error);
43
44static inline int crypto_set_driver_name(struct crypto_alg *alg)
45{
46 static const char suffix[] = "-generic";
47 char *driver_name = alg->cra_driver_name;
48 int len;
49
50 if (*driver_name)
51 return 0;
52
53 len = strlcpy(driver_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
54 if (len + sizeof(suffix) > CRYPTO_MAX_ALG_NAME)
55 return -ENAMETOOLONG;
56
57 memcpy(driver_name + len, suffix, sizeof(suffix));
58 return 0;
59}
60
61static int crypto_check_alg(struct crypto_alg *alg)
62{
63 if (alg->cra_alignmask & (alg->cra_alignmask + 1))
64 return -EINVAL;
65
66 if (alg->cra_alignmask & alg->cra_blocksize)
67 return -EINVAL;
68
69 if (alg->cra_blocksize > PAGE_SIZE / 8)
70 return -EINVAL;
71
72 if (alg->cra_priority < 0)
73 return -EINVAL;
74
75 return crypto_set_driver_name(alg);
76}
77
78static void crypto_destroy_instance(struct crypto_alg *alg)
79{
80 struct crypto_instance *inst = (void *)alg;
81 struct crypto_template *tmpl = inst->tmpl;
82
83 tmpl->free(inst);
84 crypto_tmpl_put(tmpl);
85}
86
87static void crypto_remove_spawns(struct list_head *spawns,
88 struct list_head *list)
89{
90 struct crypto_spawn *spawn, *n;
91
92 list_for_each_entry_safe(spawn, n, spawns, list) {
93 struct crypto_instance *inst = spawn->inst;
94 struct crypto_template *tmpl = inst->tmpl;
95
96 list_del_init(&spawn->list);
97 spawn->alg = NULL;
98
99 if (crypto_is_dead(&inst->alg))
100 continue;
101
102 inst->alg.cra_flags |= CRYPTO_ALG_DEAD;
103 if (!tmpl || !crypto_tmpl_get(tmpl))
104 continue;
105
106 crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, &inst->alg);
107 list_move(&inst->alg.cra_list, list);
108 hlist_del(&inst->list);
109 inst->alg.cra_destroy = crypto_destroy_instance;
110
111 if (!list_empty(&inst->alg.cra_users)) {
112 if (&n->list == spawns)
113 n = list_entry(inst->alg.cra_users.next,
114 typeof(*n), list);
115 __list_splice(&inst->alg.cra_users, spawns->prev);
116 }
117 }
118}
119
120static int __crypto_register_alg(struct crypto_alg *alg,
121 struct list_head *list)
122{
123 struct crypto_alg *q;
124 int ret = -EAGAIN;
125
126 if (crypto_is_dead(alg))
127 goto out;
128
129 INIT_LIST_HEAD(&alg->cra_users);
130
131 ret = -EEXIST;
132
133 atomic_set(&alg->cra_refcnt, 1);
134 list_for_each_entry(q, &crypto_alg_list, cra_list) {
135 if (q == alg)
136 goto out;
137
138 if (crypto_is_moribund(q))
139 continue;
140
141 if (crypto_is_larval(q)) {
142 struct crypto_larval *larval = (void *)q;
143
144 if (strcmp(alg->cra_name, q->cra_name) &&
145 strcmp(alg->cra_driver_name, q->cra_name))
146 continue;
147
148 if (larval->adult)
149 continue;
150 if ((q->cra_flags ^ alg->cra_flags) & larval->mask)
151 continue;
152 if (!crypto_mod_get(alg))
153 continue;
154
155 larval->adult = alg;
156 complete(&larval->completion);
157 continue;
158 }
159
160 if (strcmp(alg->cra_name, q->cra_name))
161 continue;
162
163 if (strcmp(alg->cra_driver_name, q->cra_driver_name) &&
164 q->cra_priority > alg->cra_priority)
165 continue;
166
167 crypto_remove_spawns(&q->cra_users, list);
168 }
169
170 list_add(&alg->cra_list, &crypto_alg_list);
171
172 crypto_notify(CRYPTO_MSG_ALG_REGISTER, alg);
173 ret = 0;
174
175out:
176 return ret;
177}
178
179static void crypto_remove_final(struct list_head *list)
180{
181 struct crypto_alg *alg;
182 struct crypto_alg *n;
183
184 list_for_each_entry_safe(alg, n, list, cra_list) {
185 list_del_init(&alg->cra_list);
186 crypto_alg_put(alg);
187 }
188}
189
190int crypto_register_alg(struct crypto_alg *alg)
191{
192 LIST_HEAD(list);
193 int err;
194
195 err = crypto_check_alg(alg);
196 if (err)
197 return err;
198
199 down_write(&crypto_alg_sem);
200 err = __crypto_register_alg(alg, &list);
201 up_write(&crypto_alg_sem);
202
203 crypto_remove_final(&list);
204 return err;
205}
206EXPORT_SYMBOL_GPL(crypto_register_alg);
207
208static int crypto_remove_alg(struct crypto_alg *alg, struct list_head *list)
209{
210 if (unlikely(list_empty(&alg->cra_list)))
211 return -ENOENT;
212
213 alg->cra_flags |= CRYPTO_ALG_DEAD;
214
215 crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg);
216 list_del_init(&alg->cra_list);
217 crypto_remove_spawns(&alg->cra_users, list);
218
219 return 0;
220}
221
222int crypto_unregister_alg(struct crypto_alg *alg)
223{
224 int ret;
225 LIST_HEAD(list);
226
227 down_write(&crypto_alg_sem);
228 ret = crypto_remove_alg(alg, &list);
229 up_write(&crypto_alg_sem);
230
231 if (ret)
232 return ret;
233
234 BUG_ON(atomic_read(&alg->cra_refcnt) != 1);
235 if (alg->cra_destroy)
236 alg->cra_destroy(alg);
237
238 crypto_remove_final(&list);
239 return 0;
240}
241EXPORT_SYMBOL_GPL(crypto_unregister_alg);
242
243int crypto_register_template(struct crypto_template *tmpl)
244{
245 struct crypto_template *q;
246 int err = -EEXIST;
247
248 down_write(&crypto_alg_sem);
249
250 list_for_each_entry(q, &crypto_template_list, list) {
251 if (q == tmpl)
252 goto out;
253 }
254
255 list_add(&tmpl->list, &crypto_template_list);
256 crypto_notify(CRYPTO_MSG_TMPL_REGISTER, tmpl);
257 err = 0;
258out:
259 up_write(&crypto_alg_sem);
260 return err;
261}
262EXPORT_SYMBOL_GPL(crypto_register_template);
263
264void crypto_unregister_template(struct crypto_template *tmpl)
265{
266 struct crypto_instance *inst;
267 struct hlist_node *p, *n;
268 struct hlist_head *list;
269 LIST_HEAD(users);
270
271 down_write(&crypto_alg_sem);
272
273 BUG_ON(list_empty(&tmpl->list));
274 list_del_init(&tmpl->list);
275
276 list = &tmpl->instances;
277 hlist_for_each_entry(inst, p, list, list) {
278 int err = crypto_remove_alg(&inst->alg, &users);
279 BUG_ON(err);
280 }
281
282 crypto_notify(CRYPTO_MSG_TMPL_UNREGISTER, tmpl);
283
284 up_write(&crypto_alg_sem);
285
286 hlist_for_each_entry_safe(inst, p, n, list, list) {
287 BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1);
288 tmpl->free(inst);
289 }
290 crypto_remove_final(&users);
291}
292EXPORT_SYMBOL_GPL(crypto_unregister_template);
293
294static struct crypto_template *__crypto_lookup_template(const char *name)
295{
296 struct crypto_template *q, *tmpl = NULL;
297
298 down_read(&crypto_alg_sem);
299 list_for_each_entry(q, &crypto_template_list, list) {
300 if (strcmp(q->name, name))
301 continue;
302 if (unlikely(!crypto_tmpl_get(q)))
303 continue;
304
305 tmpl = q;
306 break;
307 }
308 up_read(&crypto_alg_sem);
309
310 return tmpl;
311}
312
313struct crypto_template *crypto_lookup_template(const char *name)
314{
315 return try_then_request_module(__crypto_lookup_template(name), name);
316}
317EXPORT_SYMBOL_GPL(crypto_lookup_template);
318
319int crypto_register_instance(struct crypto_template *tmpl,
320 struct crypto_instance *inst)
321{
322 LIST_HEAD(list);
323 int err = -EINVAL;
324
325 if (inst->alg.cra_destroy)
326 goto err;
327
328 err = crypto_check_alg(&inst->alg);
329 if (err)
330 goto err;
331
332 inst->alg.cra_module = tmpl->module;
333
334 down_write(&crypto_alg_sem);
335
336 err = __crypto_register_alg(&inst->alg, &list);
337 if (err)
338 goto unlock;
339
340 hlist_add_head(&inst->list, &tmpl->instances);
341 inst->tmpl = tmpl;
342
343unlock:
344 up_write(&crypto_alg_sem);
345
346 crypto_remove_final(&list);
347
348err:
349 return err;
350}
351EXPORT_SYMBOL_GPL(crypto_register_instance);
352
353int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
354 struct crypto_instance *inst)
355{
356 int err = -EAGAIN;
357
358 spawn->inst = inst;
359
360 down_write(&crypto_alg_sem);
361 if (!crypto_is_moribund(alg)) {
362 list_add(&spawn->list, &alg->cra_users);
363 spawn->alg = alg;
364 err = 0;
365 }
366 up_write(&crypto_alg_sem);
367
368 return err;
369}
370EXPORT_SYMBOL_GPL(crypto_init_spawn);
371
372void crypto_drop_spawn(struct crypto_spawn *spawn)
373{
374 down_write(&crypto_alg_sem);
375 list_del(&spawn->list);
376 up_write(&crypto_alg_sem);
377}
378EXPORT_SYMBOL_GPL(crypto_drop_spawn);
379
380struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn)
381{
382 struct crypto_alg *alg;
383 struct crypto_alg *alg2;
384 struct crypto_tfm *tfm;
385
386 down_read(&crypto_alg_sem);
387 alg = spawn->alg;
388 alg2 = alg;
389 if (alg2)
390 alg2 = crypto_mod_get(alg2);
391 up_read(&crypto_alg_sem);
392
393 if (!alg2) {
394 if (alg)
395 crypto_shoot_alg(alg);
396 return ERR_PTR(-EAGAIN);
397 }
398
399 tfm = __crypto_alloc_tfm(alg, 0);
400 if (IS_ERR(tfm))
401 crypto_mod_put(alg);
402
403 return tfm;
404}
405EXPORT_SYMBOL_GPL(crypto_spawn_tfm);
406
407int crypto_register_notifier(struct notifier_block *nb)
408{
409 return blocking_notifier_chain_register(&crypto_chain, nb);
410}
411EXPORT_SYMBOL_GPL(crypto_register_notifier);
412
413int crypto_unregister_notifier(struct notifier_block *nb)
414{
415 return blocking_notifier_chain_unregister(&crypto_chain, nb);
416}
417EXPORT_SYMBOL_GPL(crypto_unregister_notifier);
418
419struct crypto_alg *crypto_get_attr_alg(void *param, unsigned int len,
420 u32 type, u32 mask)
421{
422 struct rtattr *rta = param;
423 struct crypto_attr_alg *alga;
424
425 if (!RTA_OK(rta, len))
426 return ERR_PTR(-EBADR);
427 if (rta->rta_type != CRYPTOA_ALG || RTA_PAYLOAD(rta) < sizeof(*alga))
428 return ERR_PTR(-EINVAL);
429
430 alga = RTA_DATA(rta);
431 alga->name[CRYPTO_MAX_ALG_NAME - 1] = 0;
432
433 return crypto_alg_mod_lookup(alga->name, type, mask);
434}
435EXPORT_SYMBOL_GPL(crypto_get_attr_alg);
436
437struct crypto_instance *crypto_alloc_instance(const char *name,
438 struct crypto_alg *alg)
439{
440 struct crypto_instance *inst;
441 struct crypto_spawn *spawn;
442 int err;
443
444 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
445 if (!inst)
446 return ERR_PTR(-ENOMEM);
447
448 err = -ENAMETOOLONG;
449 if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name,
450 alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
451 goto err_free_inst;
452
453 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
454 name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
455 goto err_free_inst;
456
457 spawn = crypto_instance_ctx(inst);
458 err = crypto_init_spawn(spawn, alg, inst);
459
460 if (err)
461 goto err_free_inst;
462
463 return inst;
464
465err_free_inst:
466 kfree(inst);
467 return ERR_PTR(err);
468}
469EXPORT_SYMBOL_GPL(crypto_alloc_instance);
470
471static int __init crypto_algapi_init(void)
472{
473 crypto_init_proc();
474 return 0;
475}
476
477static void __exit crypto_algapi_exit(void)
478{
479 crypto_exit_proc();
480}
481
482module_init(crypto_algapi_init);
483module_exit(crypto_algapi_exit);
484
485MODULE_LICENSE("GPL");
486MODULE_DESCRIPTION("Cryptographic algorithms API");
diff --git a/crypto/anubis.c b/crypto/anubis.c
index 7e2e1a29800e..1c771f7f4dc5 100644
--- a/crypto/anubis.c
+++ b/crypto/anubis.c
@@ -461,10 +461,11 @@ static const u32 rc[] = {
461}; 461};
462 462
463static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key, 463static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
464 unsigned int key_len, u32 *flags) 464 unsigned int key_len)
465{ 465{
466 struct anubis_ctx *ctx = crypto_tfm_ctx(tfm); 466 struct anubis_ctx *ctx = crypto_tfm_ctx(tfm);
467 const __be32 *key = (const __be32 *)in_key; 467 const __be32 *key = (const __be32 *)in_key;
468 u32 *flags = &tfm->crt_flags;
468 int N, R, i, r; 469 int N, R, i, r;
469 u32 kappa[ANUBIS_MAX_N]; 470 u32 kappa[ANUBIS_MAX_N];
470 u32 inter[ANUBIS_MAX_N]; 471 u32 inter[ANUBIS_MAX_N];
diff --git a/crypto/api.c b/crypto/api.c
index c11ec1fd4f18..2e84d4b54790 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -15,70 +15,202 @@
15 * 15 *
16 */ 16 */
17 17
18#include <linux/compiler.h> 18#include <linux/err.h>
19#include <linux/init.h>
20#include <linux/crypto.h>
21#include <linux/errno.h> 19#include <linux/errno.h>
22#include <linux/kernel.h> 20#include <linux/kernel.h>
23#include <linux/kmod.h> 21#include <linux/kmod.h>
24#include <linux/rwsem.h> 22#include <linux/module.h>
23#include <linux/param.h>
24#include <linux/sched.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/string.h> 26#include <linux/string.h>
27#include "internal.h" 27#include "internal.h"
28 28
29LIST_HEAD(crypto_alg_list); 29LIST_HEAD(crypto_alg_list);
30EXPORT_SYMBOL_GPL(crypto_alg_list);
30DECLARE_RWSEM(crypto_alg_sem); 31DECLARE_RWSEM(crypto_alg_sem);
32EXPORT_SYMBOL_GPL(crypto_alg_sem);
31 33
32static inline int crypto_alg_get(struct crypto_alg *alg) 34BLOCKING_NOTIFIER_HEAD(crypto_chain);
35EXPORT_SYMBOL_GPL(crypto_chain);
36
37static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
38{
39 atomic_inc(&alg->cra_refcnt);
40 return alg;
41}
42
43struct crypto_alg *crypto_mod_get(struct crypto_alg *alg)
33{ 44{
34 return try_module_get(alg->cra_module); 45 return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL;
35} 46}
47EXPORT_SYMBOL_GPL(crypto_mod_get);
36 48
37static inline void crypto_alg_put(struct crypto_alg *alg) 49void crypto_mod_put(struct crypto_alg *alg)
38{ 50{
51 crypto_alg_put(alg);
39 module_put(alg->cra_module); 52 module_put(alg->cra_module);
40} 53}
54EXPORT_SYMBOL_GPL(crypto_mod_put);
41 55
42static struct crypto_alg *crypto_alg_lookup(const char *name) 56struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, u32 mask)
43{ 57{
44 struct crypto_alg *q, *alg = NULL; 58 struct crypto_alg *q, *alg = NULL;
45 int best = -1; 59 int best = -2;
46 60
47 if (!name)
48 return NULL;
49
50 down_read(&crypto_alg_sem);
51
52 list_for_each_entry(q, &crypto_alg_list, cra_list) { 61 list_for_each_entry(q, &crypto_alg_list, cra_list) {
53 int exact, fuzzy; 62 int exact, fuzzy;
54 63
64 if (crypto_is_moribund(q))
65 continue;
66
67 if ((q->cra_flags ^ type) & mask)
68 continue;
69
70 if (crypto_is_larval(q) &&
71 ((struct crypto_larval *)q)->mask != mask)
72 continue;
73
55 exact = !strcmp(q->cra_driver_name, name); 74 exact = !strcmp(q->cra_driver_name, name);
56 fuzzy = !strcmp(q->cra_name, name); 75 fuzzy = !strcmp(q->cra_name, name);
57 if (!exact && !(fuzzy && q->cra_priority > best)) 76 if (!exact && !(fuzzy && q->cra_priority > best))
58 continue; 77 continue;
59 78
60 if (unlikely(!crypto_alg_get(q))) 79 if (unlikely(!crypto_mod_get(q)))
61 continue; 80 continue;
62 81
63 best = q->cra_priority; 82 best = q->cra_priority;
64 if (alg) 83 if (alg)
65 crypto_alg_put(alg); 84 crypto_mod_put(alg);
66 alg = q; 85 alg = q;
67 86
68 if (exact) 87 if (exact)
69 break; 88 break;
70 } 89 }
71 90
91 return alg;
92}
93EXPORT_SYMBOL_GPL(__crypto_alg_lookup);
94
95static void crypto_larval_destroy(struct crypto_alg *alg)
96{
97 struct crypto_larval *larval = (void *)alg;
98
99 BUG_ON(!crypto_is_larval(alg));
100 if (larval->adult)
101 crypto_mod_put(larval->adult);
102 kfree(larval);
103}
104
105static struct crypto_alg *crypto_larval_alloc(const char *name, u32 type,
106 u32 mask)
107{
108 struct crypto_alg *alg;
109 struct crypto_larval *larval;
110
111 larval = kzalloc(sizeof(*larval), GFP_KERNEL);
112 if (!larval)
113 return ERR_PTR(-ENOMEM);
114
115 larval->mask = mask;
116 larval->alg.cra_flags = CRYPTO_ALG_LARVAL | type;
117 larval->alg.cra_priority = -1;
118 larval->alg.cra_destroy = crypto_larval_destroy;
119
120 atomic_set(&larval->alg.cra_refcnt, 2);
121 strlcpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME);
122 init_completion(&larval->completion);
123
124 down_write(&crypto_alg_sem);
125 alg = __crypto_alg_lookup(name, type, mask);
126 if (!alg) {
127 alg = &larval->alg;
128 list_add(&alg->cra_list, &crypto_alg_list);
129 }
130 up_write(&crypto_alg_sem);
131
132 if (alg != &larval->alg)
133 kfree(larval);
134
135 return alg;
136}
137
138static void crypto_larval_kill(struct crypto_alg *alg)
139{
140 struct crypto_larval *larval = (void *)alg;
141
142 down_write(&crypto_alg_sem);
143 list_del(&alg->cra_list);
144 up_write(&crypto_alg_sem);
145 complete(&larval->completion);
146 crypto_alg_put(alg);
147}
148
149static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
150{
151 struct crypto_larval *larval = (void *)alg;
152
153 wait_for_completion_interruptible_timeout(&larval->completion, 60 * HZ);
154 alg = larval->adult;
155 if (alg) {
156 if (!crypto_mod_get(alg))
157 alg = ERR_PTR(-EAGAIN);
158 } else
159 alg = ERR_PTR(-ENOENT);
160 crypto_mod_put(&larval->alg);
161
162 return alg;
163}
164
165static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
166 u32 mask)
167{
168 struct crypto_alg *alg;
169
170 down_read(&crypto_alg_sem);
171 alg = __crypto_alg_lookup(name, type, mask);
72 up_read(&crypto_alg_sem); 172 up_read(&crypto_alg_sem);
173
73 return alg; 174 return alg;
74} 175}
75 176
76/* A far more intelligent version of this is planned. For now, just 177struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
77 * try an exact match on the name of the algorithm. */
78static inline struct crypto_alg *crypto_alg_mod_lookup(const char *name)
79{ 178{
80 return try_then_request_module(crypto_alg_lookup(name), name); 179 struct crypto_alg *alg;
180 struct crypto_alg *larval;
181 int ok;
182
183 if (!name)
184 return ERR_PTR(-ENOENT);
185
186 mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
187 type &= mask;
188
189 alg = try_then_request_module(crypto_alg_lookup(name, type, mask),
190 name);
191 if (alg)
192 return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg;
193
194 larval = crypto_larval_alloc(name, type, mask);
195 if (IS_ERR(larval) || !crypto_is_larval(larval))
196 return larval;
197
198 ok = crypto_notify(CRYPTO_MSG_ALG_REQUEST, larval);
199 if (ok == NOTIFY_DONE) {
200 request_module("cryptomgr");
201 ok = crypto_notify(CRYPTO_MSG_ALG_REQUEST, larval);
202 }
203
204 if (ok == NOTIFY_STOP)
205 alg = crypto_larval_wait(larval);
206 else {
207 crypto_mod_put(larval);
208 alg = ERR_PTR(-ENOENT);
209 }
210 crypto_larval_kill(larval);
211 return alg;
81} 212}
213EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup);
82 214
83static int crypto_init_flags(struct crypto_tfm *tfm, u32 flags) 215static int crypto_init_flags(struct crypto_tfm *tfm, u32 flags)
84{ 216{
@@ -94,17 +226,18 @@ static int crypto_init_flags(struct crypto_tfm *tfm, u32 flags)
94 226
95 case CRYPTO_ALG_TYPE_COMPRESS: 227 case CRYPTO_ALG_TYPE_COMPRESS:
96 return crypto_init_compress_flags(tfm, flags); 228 return crypto_init_compress_flags(tfm, flags);
97
98 default:
99 break;
100 } 229 }
101 230
102 BUG(); 231 return 0;
103 return -EINVAL;
104} 232}
105 233
106static int crypto_init_ops(struct crypto_tfm *tfm) 234static int crypto_init_ops(struct crypto_tfm *tfm)
107{ 235{
236 const struct crypto_type *type = tfm->__crt_alg->cra_type;
237
238 if (type)
239 return type->init(tfm);
240
108 switch (crypto_tfm_alg_type(tfm)) { 241 switch (crypto_tfm_alg_type(tfm)) {
109 case CRYPTO_ALG_TYPE_CIPHER: 242 case CRYPTO_ALG_TYPE_CIPHER:
110 return crypto_init_cipher_ops(tfm); 243 return crypto_init_cipher_ops(tfm);
@@ -125,6 +258,14 @@ static int crypto_init_ops(struct crypto_tfm *tfm)
125 258
126static void crypto_exit_ops(struct crypto_tfm *tfm) 259static void crypto_exit_ops(struct crypto_tfm *tfm)
127{ 260{
261 const struct crypto_type *type = tfm->__crt_alg->cra_type;
262
263 if (type) {
264 if (type->exit)
265 type->exit(tfm);
266 return;
267 }
268
128 switch (crypto_tfm_alg_type(tfm)) { 269 switch (crypto_tfm_alg_type(tfm)) {
129 case CRYPTO_ALG_TYPE_CIPHER: 270 case CRYPTO_ALG_TYPE_CIPHER:
130 crypto_exit_cipher_ops(tfm); 271 crypto_exit_cipher_ops(tfm);
@@ -146,53 +287,67 @@ static void crypto_exit_ops(struct crypto_tfm *tfm)
146 287
147static unsigned int crypto_ctxsize(struct crypto_alg *alg, int flags) 288static unsigned int crypto_ctxsize(struct crypto_alg *alg, int flags)
148{ 289{
290 const struct crypto_type *type = alg->cra_type;
149 unsigned int len; 291 unsigned int len;
150 292
293 len = alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1);
294 if (type)
295 return len + type->ctxsize(alg);
296
151 switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { 297 switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
152 default: 298 default:
153 BUG(); 299 BUG();
154 300
155 case CRYPTO_ALG_TYPE_CIPHER: 301 case CRYPTO_ALG_TYPE_CIPHER:
156 len = crypto_cipher_ctxsize(alg, flags); 302 len += crypto_cipher_ctxsize(alg, flags);
157 break; 303 break;
158 304
159 case CRYPTO_ALG_TYPE_DIGEST: 305 case CRYPTO_ALG_TYPE_DIGEST:
160 len = crypto_digest_ctxsize(alg, flags); 306 len += crypto_digest_ctxsize(alg, flags);
161 break; 307 break;
162 308
163 case CRYPTO_ALG_TYPE_COMPRESS: 309 case CRYPTO_ALG_TYPE_COMPRESS:
164 len = crypto_compress_ctxsize(alg, flags); 310 len += crypto_compress_ctxsize(alg, flags);
165 break; 311 break;
166 } 312 }
167 313
168 return len + (alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1)); 314 return len;
169} 315}
170 316
171struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags) 317void crypto_shoot_alg(struct crypto_alg *alg)
318{
319 down_write(&crypto_alg_sem);
320 alg->cra_flags |= CRYPTO_ALG_DYING;
321 up_write(&crypto_alg_sem);
322}
323EXPORT_SYMBOL_GPL(crypto_shoot_alg);
324
325struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 flags)
172{ 326{
173 struct crypto_tfm *tfm = NULL; 327 struct crypto_tfm *tfm = NULL;
174 struct crypto_alg *alg;
175 unsigned int tfm_size; 328 unsigned int tfm_size;
176 329 int err = -ENOMEM;
177 alg = crypto_alg_mod_lookup(name);
178 if (alg == NULL)
179 goto out;
180 330
181 tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, flags); 331 tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, flags);
182 tfm = kzalloc(tfm_size, GFP_KERNEL); 332 tfm = kzalloc(tfm_size, GFP_KERNEL);
183 if (tfm == NULL) 333 if (tfm == NULL)
184 goto out_put; 334 goto out;
185 335
186 tfm->__crt_alg = alg; 336 tfm->__crt_alg = alg;
187 337
188 if (crypto_init_flags(tfm, flags)) 338 err = crypto_init_flags(tfm, flags);
339 if (err)
189 goto out_free_tfm; 340 goto out_free_tfm;
190 341
191 if (crypto_init_ops(tfm)) 342 err = crypto_init_ops(tfm);
343 if (err)
192 goto out_free_tfm; 344 goto out_free_tfm;
193 345
194 if (alg->cra_init && alg->cra_init(tfm)) 346 if (alg->cra_init && (err = alg->cra_init(tfm))) {
347 if (err == -EAGAIN)
348 crypto_shoot_alg(alg);
195 goto cra_init_failed; 349 goto cra_init_failed;
350 }
196 351
197 goto out; 352 goto out;
198 353
@@ -200,13 +355,97 @@ cra_init_failed:
200 crypto_exit_ops(tfm); 355 crypto_exit_ops(tfm);
201out_free_tfm: 356out_free_tfm:
202 kfree(tfm); 357 kfree(tfm);
203 tfm = NULL; 358 tfm = ERR_PTR(err);
204out_put:
205 crypto_alg_put(alg);
206out: 359out:
207 return tfm; 360 return tfm;
208} 361}
362EXPORT_SYMBOL_GPL(__crypto_alloc_tfm);
363
364struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags)
365{
366 struct crypto_tfm *tfm = NULL;
367 int err;
368
369 do {
370 struct crypto_alg *alg;
371
372 alg = crypto_alg_mod_lookup(name, 0, CRYPTO_ALG_ASYNC);
373 err = PTR_ERR(alg);
374 if (IS_ERR(alg))
375 continue;
376
377 tfm = __crypto_alloc_tfm(alg, flags);
378 err = 0;
379 if (IS_ERR(tfm)) {
380 crypto_mod_put(alg);
381 err = PTR_ERR(tfm);
382 tfm = NULL;
383 }
384 } while (err == -EAGAIN && !signal_pending(current));
385
386 return tfm;
387}
388
389/*
390 * crypto_alloc_base - Locate algorithm and allocate transform
391 * @alg_name: Name of algorithm
392 * @type: Type of algorithm
393 * @mask: Mask for type comparison
394 *
395 * crypto_alloc_base() will first attempt to locate an already loaded
396 * algorithm. If that fails and the kernel supports dynamically loadable
397 * modules, it will then attempt to load a module of the same name or
398 * alias. If that fails it will send a query to any loaded crypto manager
399 * to construct an algorithm on the fly. A refcount is grabbed on the
400 * algorithm which is then associated with the new transform.
401 *
402 * The returned transform is of a non-determinate type. Most people
403 * should use one of the more specific allocation functions such as
404 * crypto_alloc_blkcipher.
405 *
406 * In case of error the return value is an error pointer.
407 */
408struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask)
409{
410 struct crypto_tfm *tfm;
411 int err;
412
413 for (;;) {
414 struct crypto_alg *alg;
415
416 alg = crypto_alg_mod_lookup(alg_name, type, mask);
417 err = PTR_ERR(alg);
418 tfm = ERR_PTR(err);
419 if (IS_ERR(alg))
420 goto err;
421
422 tfm = __crypto_alloc_tfm(alg, 0);
423 if (!IS_ERR(tfm))
424 break;
425
426 crypto_mod_put(alg);
427 err = PTR_ERR(tfm);
209 428
429err:
430 if (err != -EAGAIN)
431 break;
432 if (signal_pending(current)) {
433 err = -EINTR;
434 break;
435 }
436 };
437
438 return tfm;
439}
440EXPORT_SYMBOL_GPL(crypto_alloc_base);
441
442/*
443 * crypto_free_tfm - Free crypto transform
444 * @tfm: Transform to free
445 *
446 * crypto_free_tfm() frees up the transform and any associated resources,
447 * then drops the refcount on the associated algorithm.
448 */
210void crypto_free_tfm(struct crypto_tfm *tfm) 449void crypto_free_tfm(struct crypto_tfm *tfm)
211{ 450{
212 struct crypto_alg *alg; 451 struct crypto_alg *alg;
@@ -221,108 +460,39 @@ void crypto_free_tfm(struct crypto_tfm *tfm)
221 if (alg->cra_exit) 460 if (alg->cra_exit)
222 alg->cra_exit(tfm); 461 alg->cra_exit(tfm);
223 crypto_exit_ops(tfm); 462 crypto_exit_ops(tfm);
224 crypto_alg_put(alg); 463 crypto_mod_put(alg);
225 memset(tfm, 0, size); 464 memset(tfm, 0, size);
226 kfree(tfm); 465 kfree(tfm);
227} 466}
228 467
229static inline int crypto_set_driver_name(struct crypto_alg *alg) 468int crypto_alg_available(const char *name, u32 flags)
230{
231 static const char suffix[] = "-generic";
232 char *driver_name = alg->cra_driver_name;
233 int len;
234
235 if (*driver_name)
236 return 0;
237
238 len = strlcpy(driver_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
239 if (len + sizeof(suffix) > CRYPTO_MAX_ALG_NAME)
240 return -ENAMETOOLONG;
241
242 memcpy(driver_name + len, suffix, sizeof(suffix));
243 return 0;
244}
245
246int crypto_register_alg(struct crypto_alg *alg)
247{ 469{
248 int ret; 470 int ret = 0;
249 struct crypto_alg *q; 471 struct crypto_alg *alg = crypto_alg_mod_lookup(name, 0,
250 472 CRYPTO_ALG_ASYNC);
251 if (alg->cra_alignmask & (alg->cra_alignmask + 1))
252 return -EINVAL;
253
254 if (alg->cra_alignmask & alg->cra_blocksize)
255 return -EINVAL;
256
257 if (alg->cra_blocksize > PAGE_SIZE / 8)
258 return -EINVAL;
259
260 if (alg->cra_priority < 0)
261 return -EINVAL;
262
263 ret = crypto_set_driver_name(alg);
264 if (unlikely(ret))
265 return ret;
266
267 down_write(&crypto_alg_sem);
268 473
269 list_for_each_entry(q, &crypto_alg_list, cra_list) { 474 if (!IS_ERR(alg)) {
270 if (q == alg) { 475 crypto_mod_put(alg);
271 ret = -EEXIST; 476 ret = 1;
272 goto out;
273 }
274 } 477 }
275 478
276 list_add(&alg->cra_list, &crypto_alg_list);
277out:
278 up_write(&crypto_alg_sem);
279 return ret; 479 return ret;
280} 480}
281 481
282int crypto_unregister_alg(struct crypto_alg *alg) 482EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
283{ 483EXPORT_SYMBOL_GPL(crypto_free_tfm);
284 int ret = -ENOENT; 484EXPORT_SYMBOL_GPL(crypto_alg_available);
285 struct crypto_alg *q;
286
287 BUG_ON(!alg->cra_module);
288
289 down_write(&crypto_alg_sem);
290 list_for_each_entry(q, &crypto_alg_list, cra_list) {
291 if (alg == q) {
292 list_del(&alg->cra_list);
293 ret = 0;
294 goto out;
295 }
296 }
297out:
298 up_write(&crypto_alg_sem);
299 return ret;
300}
301 485
302int crypto_alg_available(const char *name, u32 flags) 486int crypto_has_alg(const char *name, u32 type, u32 mask)
303{ 487{
304 int ret = 0; 488 int ret = 0;
305 struct crypto_alg *alg = crypto_alg_mod_lookup(name); 489 struct crypto_alg *alg = crypto_alg_mod_lookup(name, type, mask);
306 490
307 if (alg) { 491 if (!IS_ERR(alg)) {
308 crypto_alg_put(alg); 492 crypto_mod_put(alg);
309 ret = 1; 493 ret = 1;
310 } 494 }
311 495
312 return ret; 496 return ret;
313} 497}
314 498EXPORT_SYMBOL_GPL(crypto_has_alg);
315static int __init init_crypto(void)
316{
317 printk(KERN_INFO "Initializing Cryptographic API\n");
318 crypto_init_proc();
319 return 0;
320}
321
322__initcall(init_crypto);
323
324EXPORT_SYMBOL_GPL(crypto_register_alg);
325EXPORT_SYMBOL_GPL(crypto_unregister_alg);
326EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
327EXPORT_SYMBOL_GPL(crypto_free_tfm);
328EXPORT_SYMBOL_GPL(crypto_alg_available);
diff --git a/crypto/arc4.c b/crypto/arc4.c
index 5edc6a65b987..8be47e13a9e3 100644
--- a/crypto/arc4.c
+++ b/crypto/arc4.c
@@ -25,7 +25,7 @@ struct arc4_ctx {
25}; 25};
26 26
27static int arc4_set_key(struct crypto_tfm *tfm, const u8 *in_key, 27static int arc4_set_key(struct crypto_tfm *tfm, const u8 *in_key,
28 unsigned int key_len, u32 *flags) 28 unsigned int key_len)
29{ 29{
30 struct arc4_ctx *ctx = crypto_tfm_ctx(tfm); 30 struct arc4_ctx *ctx = crypto_tfm_ctx(tfm);
31 int i, j = 0, k = 0; 31 int i, j = 0, k = 0;
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
new file mode 100644
index 000000000000..034c939bf91a
--- /dev/null
+++ b/crypto/blkcipher.c
@@ -0,0 +1,405 @@
1/*
2 * Block chaining cipher operations.
3 *
4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5 * multiple page boundaries by using temporary blocks. In user context,
6 * the kernel is given a chance to schedule us once per page.
7 *
8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 */
16
17#include <linux/crypto.h>
18#include <linux/errno.h>
19#include <linux/kernel.h>
20#include <linux/io.h>
21#include <linux/module.h>
22#include <linux/scatterlist.h>
23#include <linux/seq_file.h>
24#include <linux/slab.h>
25#include <linux/string.h>
26
27#include "internal.h"
28#include "scatterwalk.h"
29
30enum {
31 BLKCIPHER_WALK_PHYS = 1 << 0,
32 BLKCIPHER_WALK_SLOW = 1 << 1,
33 BLKCIPHER_WALK_COPY = 1 << 2,
34 BLKCIPHER_WALK_DIFF = 1 << 3,
35};
36
37static int blkcipher_walk_next(struct blkcipher_desc *desc,
38 struct blkcipher_walk *walk);
39static int blkcipher_walk_first(struct blkcipher_desc *desc,
40 struct blkcipher_walk *walk);
41
42static inline void blkcipher_map_src(struct blkcipher_walk *walk)
43{
44 walk->src.virt.addr = scatterwalk_map(&walk->in, 0);
45}
46
47static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
48{
49 walk->dst.virt.addr = scatterwalk_map(&walk->out, 1);
50}
51
52static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
53{
54 scatterwalk_unmap(walk->src.virt.addr, 0);
55}
56
57static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
58{
59 scatterwalk_unmap(walk->dst.virt.addr, 1);
60}
61
62static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
63{
64 if (offset_in_page(start + len) < len)
65 return (u8 *)((unsigned long)(start + len) & PAGE_MASK);
66 return start;
67}
68
69static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm,
70 struct blkcipher_walk *walk,
71 unsigned int bsize)
72{
73 u8 *addr;
74 unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
75
76 addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
77 addr = blkcipher_get_spot(addr, bsize);
78 scatterwalk_copychunks(addr, &walk->out, bsize, 1);
79 return bsize;
80}
81
82static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
83 unsigned int n)
84{
85 n = walk->nbytes - n;
86
87 if (walk->flags & BLKCIPHER_WALK_COPY) {
88 blkcipher_map_dst(walk);
89 memcpy(walk->dst.virt.addr, walk->page, n);
90 blkcipher_unmap_dst(walk);
91 } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
92 blkcipher_unmap_src(walk);
93 if (walk->flags & BLKCIPHER_WALK_DIFF)
94 blkcipher_unmap_dst(walk);
95 }
96
97 scatterwalk_advance(&walk->in, n);
98 scatterwalk_advance(&walk->out, n);
99
100 return n;
101}
102
103int blkcipher_walk_done(struct blkcipher_desc *desc,
104 struct blkcipher_walk *walk, int err)
105{
106 struct crypto_blkcipher *tfm = desc->tfm;
107 unsigned int nbytes = 0;
108
109 if (likely(err >= 0)) {
110 unsigned int bsize = crypto_blkcipher_blocksize(tfm);
111 unsigned int n;
112
113 if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
114 n = blkcipher_done_fast(walk, err);
115 else
116 n = blkcipher_done_slow(tfm, walk, bsize);
117
118 nbytes = walk->total - n;
119 err = 0;
120 }
121
122 scatterwalk_done(&walk->in, 0, nbytes);
123 scatterwalk_done(&walk->out, 1, nbytes);
124
125 walk->total = nbytes;
126 walk->nbytes = nbytes;
127
128 if (nbytes) {
129 crypto_yield(desc->flags);
130 return blkcipher_walk_next(desc, walk);
131 }
132
133 if (walk->iv != desc->info)
134 memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm));
135 if (walk->buffer != walk->page)
136 kfree(walk->buffer);
137 if (walk->page)
138 free_page((unsigned long)walk->page);
139
140 return err;
141}
142EXPORT_SYMBOL_GPL(blkcipher_walk_done);
143
144static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
145 struct blkcipher_walk *walk,
146 unsigned int bsize,
147 unsigned int alignmask)
148{
149 unsigned int n;
150
151 if (walk->buffer)
152 goto ok;
153
154 walk->buffer = walk->page;
155 if (walk->buffer)
156 goto ok;
157
158 n = bsize * 2 + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
159 walk->buffer = kmalloc(n, GFP_ATOMIC);
160 if (!walk->buffer)
161 return blkcipher_walk_done(desc, walk, -ENOMEM);
162
163ok:
164 walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
165 alignmask + 1);
166 walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
167 walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr + bsize,
168 bsize);
169
170 scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
171
172 walk->nbytes = bsize;
173 walk->flags |= BLKCIPHER_WALK_SLOW;
174
175 return 0;
176}
177
178static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
179{
180 u8 *tmp = walk->page;
181
182 blkcipher_map_src(walk);
183 memcpy(tmp, walk->src.virt.addr, walk->nbytes);
184 blkcipher_unmap_src(walk);
185
186 walk->src.virt.addr = tmp;
187 walk->dst.virt.addr = tmp;
188
189 return 0;
190}
191
192static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
193 struct blkcipher_walk *walk)
194{
195 unsigned long diff;
196
197 walk->src.phys.page = scatterwalk_page(&walk->in);
198 walk->src.phys.offset = offset_in_page(walk->in.offset);
199 walk->dst.phys.page = scatterwalk_page(&walk->out);
200 walk->dst.phys.offset = offset_in_page(walk->out.offset);
201
202 if (walk->flags & BLKCIPHER_WALK_PHYS)
203 return 0;
204
205 diff = walk->src.phys.offset - walk->dst.phys.offset;
206 diff |= walk->src.virt.page - walk->dst.virt.page;
207
208 blkcipher_map_src(walk);
209 walk->dst.virt.addr = walk->src.virt.addr;
210
211 if (diff) {
212 walk->flags |= BLKCIPHER_WALK_DIFF;
213 blkcipher_map_dst(walk);
214 }
215
216 return 0;
217}
218
219static int blkcipher_walk_next(struct blkcipher_desc *desc,
220 struct blkcipher_walk *walk)
221{
222 struct crypto_blkcipher *tfm = desc->tfm;
223 unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
224 unsigned int bsize = crypto_blkcipher_blocksize(tfm);
225 unsigned int n;
226 int err;
227
228 n = walk->total;
229 if (unlikely(n < bsize)) {
230 desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
231 return blkcipher_walk_done(desc, walk, -EINVAL);
232 }
233
234 walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
235 BLKCIPHER_WALK_DIFF);
236 if (!scatterwalk_aligned(&walk->in, alignmask) ||
237 !scatterwalk_aligned(&walk->out, alignmask)) {
238 walk->flags |= BLKCIPHER_WALK_COPY;
239 if (!walk->page) {
240 walk->page = (void *)__get_free_page(GFP_ATOMIC);
241 if (!walk->page)
242 n = 0;
243 }
244 }
245
246 n = scatterwalk_clamp(&walk->in, n);
247 n = scatterwalk_clamp(&walk->out, n);
248
249 if (unlikely(n < bsize)) {
250 err = blkcipher_next_slow(desc, walk, bsize, alignmask);
251 goto set_phys_lowmem;
252 }
253
254 walk->nbytes = n;
255 if (walk->flags & BLKCIPHER_WALK_COPY) {
256 err = blkcipher_next_copy(walk);
257 goto set_phys_lowmem;
258 }
259
260 return blkcipher_next_fast(desc, walk);
261
262set_phys_lowmem:
263 if (walk->flags & BLKCIPHER_WALK_PHYS) {
264 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
265 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
266 walk->src.phys.offset &= PAGE_SIZE - 1;
267 walk->dst.phys.offset &= PAGE_SIZE - 1;
268 }
269 return err;
270}
271
272static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
273 struct crypto_blkcipher *tfm,
274 unsigned int alignmask)
275{
276 unsigned bs = crypto_blkcipher_blocksize(tfm);
277 unsigned int ivsize = crypto_blkcipher_ivsize(tfm);
278 unsigned int size = bs * 2 + ivsize + max(bs, ivsize) - (alignmask + 1);
279 u8 *iv;
280
281 size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
282 walk->buffer = kmalloc(size, GFP_ATOMIC);
283 if (!walk->buffer)
284 return -ENOMEM;
285
286 iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
287 iv = blkcipher_get_spot(iv, bs) + bs;
288 iv = blkcipher_get_spot(iv, bs) + bs;
289 iv = blkcipher_get_spot(iv, ivsize);
290
291 walk->iv = memcpy(iv, walk->iv, ivsize);
292 return 0;
293}
294
295int blkcipher_walk_virt(struct blkcipher_desc *desc,
296 struct blkcipher_walk *walk)
297{
298 walk->flags &= ~BLKCIPHER_WALK_PHYS;
299 return blkcipher_walk_first(desc, walk);
300}
301EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
302
303int blkcipher_walk_phys(struct blkcipher_desc *desc,
304 struct blkcipher_walk *walk)
305{
306 walk->flags |= BLKCIPHER_WALK_PHYS;
307 return blkcipher_walk_first(desc, walk);
308}
309EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
310
311static int blkcipher_walk_first(struct blkcipher_desc *desc,
312 struct blkcipher_walk *walk)
313{
314 struct crypto_blkcipher *tfm = desc->tfm;
315 unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
316
317 walk->nbytes = walk->total;
318 if (unlikely(!walk->total))
319 return 0;
320
321 walk->buffer = NULL;
322 walk->iv = desc->info;
323 if (unlikely(((unsigned long)walk->iv & alignmask))) {
324 int err = blkcipher_copy_iv(walk, tfm, alignmask);
325 if (err)
326 return err;
327 }
328
329 scatterwalk_start(&walk->in, walk->in.sg);
330 scatterwalk_start(&walk->out, walk->out.sg);
331 walk->page = NULL;
332
333 return blkcipher_walk_next(desc, walk);
334}
335
336static int setkey(struct crypto_tfm *tfm, const u8 *key,
337 unsigned int keylen)
338{
339 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
340
341 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
342 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
343 return -EINVAL;
344 }
345
346 return cipher->setkey(tfm, key, keylen);
347}
348
349static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg)
350{
351 struct blkcipher_alg *cipher = &alg->cra_blkcipher;
352 unsigned int len = alg->cra_ctxsize;
353
354 if (cipher->ivsize) {
355 len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
356 len += cipher->ivsize;
357 }
358
359 return len;
360}
361
362static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm)
363{
364 struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
365 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
366 unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
367 unsigned long addr;
368
369 if (alg->ivsize > PAGE_SIZE / 8)
370 return -EINVAL;
371
372 crt->setkey = setkey;
373 crt->encrypt = alg->encrypt;
374 crt->decrypt = alg->decrypt;
375
376 addr = (unsigned long)crypto_tfm_ctx(tfm);
377 addr = ALIGN(addr, align);
378 addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
379 crt->iv = (void *)addr;
380
381 return 0;
382}
383
384static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
385 __attribute_used__;
386static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
387{
388 seq_printf(m, "type : blkcipher\n");
389 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
390 seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize);
391 seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize);
392 seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize);
393}
394
395const struct crypto_type crypto_blkcipher_type = {
396 .ctxsize = crypto_blkcipher_ctxsize,
397 .init = crypto_init_blkcipher_ops,
398#ifdef CONFIG_PROC_FS
399 .show = crypto_blkcipher_show,
400#endif
401};
402EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
403
404MODULE_LICENSE("GPL");
405MODULE_DESCRIPTION("Generic block chaining cipher type");
diff --git a/crypto/blowfish.c b/crypto/blowfish.c
index 490265f42b3b..55238c4e37f0 100644
--- a/crypto/blowfish.c
+++ b/crypto/blowfish.c
@@ -399,8 +399,7 @@ static void bf_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
399/* 399/*
400 * Calculates the blowfish S and P boxes for encryption and decryption. 400 * Calculates the blowfish S and P boxes for encryption and decryption.
401 */ 401 */
402static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, 402static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
403 unsigned int keylen, u32 *flags)
404{ 403{
405 struct bf_ctx *ctx = crypto_tfm_ctx(tfm); 404 struct bf_ctx *ctx = crypto_tfm_ctx(tfm);
406 u32 *P = ctx->p; 405 u32 *P = ctx->p;
diff --git a/crypto/cast5.c b/crypto/cast5.c
index 08eef58c1d3d..13ea60abc19a 100644
--- a/crypto/cast5.c
+++ b/crypto/cast5.c
@@ -769,8 +769,7 @@ static void key_schedule(u32 * x, u32 * z, u32 * k)
769} 769}
770 770
771 771
772static int cast5_setkey(struct crypto_tfm *tfm, const u8 *key, 772static int cast5_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned key_len)
773 unsigned key_len, u32 *flags)
774{ 773{
775 struct cast5_ctx *c = crypto_tfm_ctx(tfm); 774 struct cast5_ctx *c = crypto_tfm_ctx(tfm);
776 int i; 775 int i;
@@ -778,11 +777,6 @@ static int cast5_setkey(struct crypto_tfm *tfm, const u8 *key,
778 u32 z[4]; 777 u32 z[4];
779 u32 k[16]; 778 u32 k[16];
780 __be32 p_key[4]; 779 __be32 p_key[4];
781
782 if (key_len < 5 || key_len > 16) {
783 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
784 return -EINVAL;
785 }
786 780
787 c->rr = key_len <= 10 ? 1 : 0; 781 c->rr = key_len <= 10 ? 1 : 0;
788 782
diff --git a/crypto/cast6.c b/crypto/cast6.c
index 08e33bfc3ad1..136ab6dfe8c5 100644
--- a/crypto/cast6.c
+++ b/crypto/cast6.c
@@ -382,14 +382,15 @@ static inline void W(u32 *key, unsigned int i) {
382} 382}
383 383
384static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key, 384static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key,
385 unsigned key_len, u32 *flags) 385 unsigned key_len)
386{ 386{
387 int i; 387 int i;
388 u32 key[8]; 388 u32 key[8];
389 __be32 p_key[8]; /* padded key */ 389 __be32 p_key[8]; /* padded key */
390 struct cast6_ctx *c = crypto_tfm_ctx(tfm); 390 struct cast6_ctx *c = crypto_tfm_ctx(tfm);
391 u32 *flags = &tfm->crt_flags;
391 392
392 if (key_len < 16 || key_len > 32 || key_len % 4 != 0) { 393 if (key_len % 4 != 0) {
393 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 394 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
394 return -EINVAL; 395 return -EINVAL;
395 } 396 }
diff --git a/crypto/cbc.c b/crypto/cbc.c
new file mode 100644
index 000000000000..f5542b4db387
--- /dev/null
+++ b/crypto/cbc.c
@@ -0,0 +1,344 @@
1/*
2 * CBC: Cipher Block Chaining mode
3 *
4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13#include <crypto/algapi.h>
14#include <linux/err.h>
15#include <linux/init.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/scatterlist.h>
19#include <linux/slab.h>
20
21struct crypto_cbc_ctx {
22 struct crypto_cipher *child;
23 void (*xor)(u8 *dst, const u8 *src, unsigned int bs);
24};
25
26static int crypto_cbc_setkey(struct crypto_tfm *parent, const u8 *key,
27 unsigned int keylen)
28{
29 struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(parent);
30 struct crypto_cipher *child = ctx->child;
31 int err;
32
33 crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
34 crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
35 CRYPTO_TFM_REQ_MASK);
36 err = crypto_cipher_setkey(child, key, keylen);
37 crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
38 CRYPTO_TFM_RES_MASK);
39 return err;
40}
41
42static int crypto_cbc_encrypt_segment(struct blkcipher_desc *desc,
43 struct blkcipher_walk *walk,
44 struct crypto_cipher *tfm,
45 void (*xor)(u8 *, const u8 *,
46 unsigned int))
47{
48 void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
49 crypto_cipher_alg(tfm)->cia_encrypt;
50 int bsize = crypto_cipher_blocksize(tfm);
51 unsigned int nbytes = walk->nbytes;
52 u8 *src = walk->src.virt.addr;
53 u8 *dst = walk->dst.virt.addr;
54 u8 *iv = walk->iv;
55
56 do {
57 xor(iv, src, bsize);
58 fn(crypto_cipher_tfm(tfm), dst, iv);
59 memcpy(iv, dst, bsize);
60
61 src += bsize;
62 dst += bsize;
63 } while ((nbytes -= bsize) >= bsize);
64
65 return nbytes;
66}
67
68static int crypto_cbc_encrypt_inplace(struct blkcipher_desc *desc,
69 struct blkcipher_walk *walk,
70 struct crypto_cipher *tfm,
71 void (*xor)(u8 *, const u8 *,
72 unsigned int))
73{
74 void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
75 crypto_cipher_alg(tfm)->cia_encrypt;
76 int bsize = crypto_cipher_blocksize(tfm);
77 unsigned int nbytes = walk->nbytes;
78 u8 *src = walk->src.virt.addr;
79 u8 *iv = walk->iv;
80
81 do {
82 xor(src, iv, bsize);
83 fn(crypto_cipher_tfm(tfm), src, src);
84 iv = src;
85
86 src += bsize;
87 } while ((nbytes -= bsize) >= bsize);
88
89 memcpy(walk->iv, iv, bsize);
90
91 return nbytes;
92}
93
94static int crypto_cbc_encrypt(struct blkcipher_desc *desc,
95 struct scatterlist *dst, struct scatterlist *src,
96 unsigned int nbytes)
97{
98 struct blkcipher_walk walk;
99 struct crypto_blkcipher *tfm = desc->tfm;
100 struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
101 struct crypto_cipher *child = ctx->child;
102 void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor;
103 int err;
104
105 blkcipher_walk_init(&walk, dst, src, nbytes);
106 err = blkcipher_walk_virt(desc, &walk);
107
108 while ((nbytes = walk.nbytes)) {
109 if (walk.src.virt.addr == walk.dst.virt.addr)
110 nbytes = crypto_cbc_encrypt_inplace(desc, &walk, child,
111 xor);
112 else
113 nbytes = crypto_cbc_encrypt_segment(desc, &walk, child,
114 xor);
115 err = blkcipher_walk_done(desc, &walk, nbytes);
116 }
117
118 return err;
119}
120
121static int crypto_cbc_decrypt_segment(struct blkcipher_desc *desc,
122 struct blkcipher_walk *walk,
123 struct crypto_cipher *tfm,
124 void (*xor)(u8 *, const u8 *,
125 unsigned int))
126{
127 void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
128 crypto_cipher_alg(tfm)->cia_decrypt;
129 int bsize = crypto_cipher_blocksize(tfm);
130 unsigned int nbytes = walk->nbytes;
131 u8 *src = walk->src.virt.addr;
132 u8 *dst = walk->dst.virt.addr;
133 u8 *iv = walk->iv;
134
135 do {
136 fn(crypto_cipher_tfm(tfm), dst, src);
137 xor(dst, iv, bsize);
138 iv = src;
139
140 src += bsize;
141 dst += bsize;
142 } while ((nbytes -= bsize) >= bsize);
143
144 memcpy(walk->iv, iv, bsize);
145
146 return nbytes;
147}
148
149static int crypto_cbc_decrypt_inplace(struct blkcipher_desc *desc,
150 struct blkcipher_walk *walk,
151 struct crypto_cipher *tfm,
152 void (*xor)(u8 *, const u8 *,
153 unsigned int))
154{
155 void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
156 crypto_cipher_alg(tfm)->cia_decrypt;
157 int bsize = crypto_cipher_blocksize(tfm);
158 unsigned long alignmask = crypto_cipher_alignmask(tfm);
159 unsigned int nbytes = walk->nbytes;
160 u8 *src = walk->src.virt.addr;
161 u8 stack[bsize + alignmask];
162 u8 *first_iv = (u8 *)ALIGN((unsigned long)stack, alignmask + 1);
163
164 memcpy(first_iv, walk->iv, bsize);
165
166 /* Start of the last block. */
167 src += nbytes - nbytes % bsize - bsize;
168 memcpy(walk->iv, src, bsize);
169
170 for (;;) {
171 fn(crypto_cipher_tfm(tfm), src, src);
172 if ((nbytes -= bsize) < bsize)
173 break;
174 xor(src, src - bsize, bsize);
175 src -= bsize;
176 }
177
178 xor(src, first_iv, bsize);
179
180 return nbytes;
181}
182
183static int crypto_cbc_decrypt(struct blkcipher_desc *desc,
184 struct scatterlist *dst, struct scatterlist *src,
185 unsigned int nbytes)
186{
187 struct blkcipher_walk walk;
188 struct crypto_blkcipher *tfm = desc->tfm;
189 struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
190 struct crypto_cipher *child = ctx->child;
191 void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor;
192 int err;
193
194 blkcipher_walk_init(&walk, dst, src, nbytes);
195 err = blkcipher_walk_virt(desc, &walk);
196
197 while ((nbytes = walk.nbytes)) {
198 if (walk.src.virt.addr == walk.dst.virt.addr)
199 nbytes = crypto_cbc_decrypt_inplace(desc, &walk, child,
200 xor);
201 else
202 nbytes = crypto_cbc_decrypt_segment(desc, &walk, child,
203 xor);
204 err = blkcipher_walk_done(desc, &walk, nbytes);
205 }
206
207 return err;
208}
209
210static void xor_byte(u8 *a, const u8 *b, unsigned int bs)
211{
212 do {
213 *a++ ^= *b++;
214 } while (--bs);
215}
216
217static void xor_quad(u8 *dst, const u8 *src, unsigned int bs)
218{
219 u32 *a = (u32 *)dst;
220 u32 *b = (u32 *)src;
221
222 do {
223 *a++ ^= *b++;
224 } while ((bs -= 4));
225}
226
227static void xor_64(u8 *a, const u8 *b, unsigned int bs)
228{
229 ((u32 *)a)[0] ^= ((u32 *)b)[0];
230 ((u32 *)a)[1] ^= ((u32 *)b)[1];
231}
232
233static void xor_128(u8 *a, const u8 *b, unsigned int bs)
234{
235 ((u32 *)a)[0] ^= ((u32 *)b)[0];
236 ((u32 *)a)[1] ^= ((u32 *)b)[1];
237 ((u32 *)a)[2] ^= ((u32 *)b)[2];
238 ((u32 *)a)[3] ^= ((u32 *)b)[3];
239}
240
241static int crypto_cbc_init_tfm(struct crypto_tfm *tfm)
242{
243 struct crypto_instance *inst = (void *)tfm->__crt_alg;
244 struct crypto_spawn *spawn = crypto_instance_ctx(inst);
245 struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
246
247 switch (crypto_tfm_alg_blocksize(tfm)) {
248 case 8:
249 ctx->xor = xor_64;
250 break;
251
252 case 16:
253 ctx->xor = xor_128;
254 break;
255
256 default:
257 if (crypto_tfm_alg_blocksize(tfm) % 4)
258 ctx->xor = xor_byte;
259 else
260 ctx->xor = xor_quad;
261 }
262
263 tfm = crypto_spawn_tfm(spawn);
264 if (IS_ERR(tfm))
265 return PTR_ERR(tfm);
266
267 ctx->child = crypto_cipher_cast(tfm);
268 return 0;
269}
270
271static void crypto_cbc_exit_tfm(struct crypto_tfm *tfm)
272{
273 struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
274 crypto_free_cipher(ctx->child);
275}
276
277static struct crypto_instance *crypto_cbc_alloc(void *param, unsigned int len)
278{
279 struct crypto_instance *inst;
280 struct crypto_alg *alg;
281
282 alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER,
283 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
284 if (IS_ERR(alg))
285 return ERR_PTR(PTR_ERR(alg));
286
287 inst = crypto_alloc_instance("cbc", alg);
288 if (IS_ERR(inst))
289 goto out_put_alg;
290
291 inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
292 inst->alg.cra_priority = alg->cra_priority;
293 inst->alg.cra_blocksize = alg->cra_blocksize;
294 inst->alg.cra_alignmask = alg->cra_alignmask;
295 inst->alg.cra_type = &crypto_blkcipher_type;
296
297 if (!(alg->cra_blocksize % 4))
298 inst->alg.cra_alignmask |= 3;
299 inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
300 inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
301 inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
302
303 inst->alg.cra_ctxsize = sizeof(struct crypto_cbc_ctx);
304
305 inst->alg.cra_init = crypto_cbc_init_tfm;
306 inst->alg.cra_exit = crypto_cbc_exit_tfm;
307
308 inst->alg.cra_blkcipher.setkey = crypto_cbc_setkey;
309 inst->alg.cra_blkcipher.encrypt = crypto_cbc_encrypt;
310 inst->alg.cra_blkcipher.decrypt = crypto_cbc_decrypt;
311
312out_put_alg:
313 crypto_mod_put(alg);
314 return inst;
315}
316
317static void crypto_cbc_free(struct crypto_instance *inst)
318{
319 crypto_drop_spawn(crypto_instance_ctx(inst));
320 kfree(inst);
321}
322
323static struct crypto_template crypto_cbc_tmpl = {
324 .name = "cbc",
325 .alloc = crypto_cbc_alloc,
326 .free = crypto_cbc_free,
327 .module = THIS_MODULE,
328};
329
330static int __init crypto_cbc_module_init(void)
331{
332 return crypto_register_template(&crypto_cbc_tmpl);
333}
334
335static void __exit crypto_cbc_module_exit(void)
336{
337 crypto_unregister_template(&crypto_cbc_tmpl);
338}
339
340module_init(crypto_cbc_module_init);
341module_exit(crypto_cbc_module_exit);
342
343MODULE_LICENSE("GPL");
344MODULE_DESCRIPTION("CBC block cipher algorithm");
diff --git a/crypto/cipher.c b/crypto/cipher.c
index b899eb97abd7..9e03701cfdcc 100644
--- a/crypto/cipher.c
+++ b/crypto/cipher.c
@@ -23,6 +23,28 @@
23#include "internal.h" 23#include "internal.h"
24#include "scatterwalk.h" 24#include "scatterwalk.h"
25 25
26struct cipher_alg_compat {
27 unsigned int cia_min_keysize;
28 unsigned int cia_max_keysize;
29 int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key,
30 unsigned int keylen);
31 void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
32 void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
33
34 unsigned int (*cia_encrypt_ecb)(const struct cipher_desc *desc,
35 u8 *dst, const u8 *src,
36 unsigned int nbytes);
37 unsigned int (*cia_decrypt_ecb)(const struct cipher_desc *desc,
38 u8 *dst, const u8 *src,
39 unsigned int nbytes);
40 unsigned int (*cia_encrypt_cbc)(const struct cipher_desc *desc,
41 u8 *dst, const u8 *src,
42 unsigned int nbytes);
43 unsigned int (*cia_decrypt_cbc)(const struct cipher_desc *desc,
44 u8 *dst, const u8 *src,
45 unsigned int nbytes);
46};
47
26static inline void xor_64(u8 *a, const u8 *b) 48static inline void xor_64(u8 *a, const u8 *b)
27{ 49{
28 ((u32 *)a)[0] ^= ((u32 *)b)[0]; 50 ((u32 *)a)[0] ^= ((u32 *)b)[0];
@@ -45,15 +67,10 @@ static unsigned int crypt_slow(const struct cipher_desc *desc,
45 u8 buffer[bsize * 2 + alignmask]; 67 u8 buffer[bsize * 2 + alignmask];
46 u8 *src = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); 68 u8 *src = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
47 u8 *dst = src + bsize; 69 u8 *dst = src + bsize;
48 unsigned int n;
49
50 n = scatterwalk_copychunks(src, in, bsize, 0);
51 scatterwalk_advance(in, n);
52 70
71 scatterwalk_copychunks(src, in, bsize, 0);
53 desc->prfn(desc, dst, src, bsize); 72 desc->prfn(desc, dst, src, bsize);
54 73 scatterwalk_copychunks(dst, out, bsize, 1);
55 n = scatterwalk_copychunks(dst, out, bsize, 1);
56 scatterwalk_advance(out, n);
57 74
58 return bsize; 75 return bsize;
59} 76}
@@ -64,12 +81,16 @@ static inline unsigned int crypt_fast(const struct cipher_desc *desc,
64 unsigned int nbytes, u8 *tmp) 81 unsigned int nbytes, u8 *tmp)
65{ 82{
66 u8 *src, *dst; 83 u8 *src, *dst;
84 u8 *real_src, *real_dst;
85
86 real_src = scatterwalk_map(in, 0);
87 real_dst = scatterwalk_map(out, 1);
67 88
68 src = in->data; 89 src = real_src;
69 dst = scatterwalk_samebuf(in, out) ? src : out->data; 90 dst = scatterwalk_samebuf(in, out) ? src : real_dst;
70 91
71 if (tmp) { 92 if (tmp) {
72 memcpy(tmp, in->data, nbytes); 93 memcpy(tmp, src, nbytes);
73 src = tmp; 94 src = tmp;
74 dst = tmp; 95 dst = tmp;
75 } 96 }
@@ -77,7 +98,10 @@ static inline unsigned int crypt_fast(const struct cipher_desc *desc,
77 nbytes = desc->prfn(desc, dst, src, nbytes); 98 nbytes = desc->prfn(desc, dst, src, nbytes);
78 99
79 if (tmp) 100 if (tmp)
80 memcpy(out->data, tmp, nbytes); 101 memcpy(real_dst, tmp, nbytes);
102
103 scatterwalk_unmap(real_src, 0);
104 scatterwalk_unmap(real_dst, 1);
81 105
82 scatterwalk_advance(in, nbytes); 106 scatterwalk_advance(in, nbytes);
83 scatterwalk_advance(out, nbytes); 107 scatterwalk_advance(out, nbytes);
@@ -126,9 +150,6 @@ static int crypt(const struct cipher_desc *desc,
126 tmp = (u8 *)buffer; 150 tmp = (u8 *)buffer;
127 } 151 }
128 152
129 scatterwalk_map(&walk_in, 0);
130 scatterwalk_map(&walk_out, 1);
131
132 n = scatterwalk_clamp(&walk_in, n); 153 n = scatterwalk_clamp(&walk_in, n);
133 n = scatterwalk_clamp(&walk_out, n); 154 n = scatterwalk_clamp(&walk_out, n);
134 155
@@ -145,7 +166,7 @@ static int crypt(const struct cipher_desc *desc,
145 if (!nbytes) 166 if (!nbytes)
146 break; 167 break;
147 168
148 crypto_yield(tfm); 169 crypto_yield(tfm->crt_flags);
149 } 170 }
150 171
151 if (buffer) 172 if (buffer)
@@ -264,12 +285,12 @@ static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
264{ 285{
265 struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher; 286 struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
266 287
288 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
267 if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize) { 289 if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize) {
268 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 290 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
269 return -EINVAL; 291 return -EINVAL;
270 } else 292 } else
271 return cia->cia_setkey(tfm, key, keylen, 293 return cia->cia_setkey(tfm, key, keylen);
272 &tfm->crt_flags);
273} 294}
274 295
275static int ecb_encrypt(struct crypto_tfm *tfm, 296static int ecb_encrypt(struct crypto_tfm *tfm,
@@ -277,7 +298,7 @@ static int ecb_encrypt(struct crypto_tfm *tfm,
277 struct scatterlist *src, unsigned int nbytes) 298 struct scatterlist *src, unsigned int nbytes)
278{ 299{
279 struct cipher_desc desc; 300 struct cipher_desc desc;
280 struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; 301 struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;
281 302
282 desc.tfm = tfm; 303 desc.tfm = tfm;
283 desc.crfn = cipher->cia_encrypt; 304 desc.crfn = cipher->cia_encrypt;
@@ -292,7 +313,7 @@ static int ecb_decrypt(struct crypto_tfm *tfm,
292 unsigned int nbytes) 313 unsigned int nbytes)
293{ 314{
294 struct cipher_desc desc; 315 struct cipher_desc desc;
295 struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; 316 struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;
296 317
297 desc.tfm = tfm; 318 desc.tfm = tfm;
298 desc.crfn = cipher->cia_decrypt; 319 desc.crfn = cipher->cia_decrypt;
@@ -307,7 +328,7 @@ static int cbc_encrypt(struct crypto_tfm *tfm,
307 unsigned int nbytes) 328 unsigned int nbytes)
308{ 329{
309 struct cipher_desc desc; 330 struct cipher_desc desc;
310 struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; 331 struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;
311 332
312 desc.tfm = tfm; 333 desc.tfm = tfm;
313 desc.crfn = cipher->cia_encrypt; 334 desc.crfn = cipher->cia_encrypt;
@@ -323,7 +344,7 @@ static int cbc_encrypt_iv(struct crypto_tfm *tfm,
323 unsigned int nbytes, u8 *iv) 344 unsigned int nbytes, u8 *iv)
324{ 345{
325 struct cipher_desc desc; 346 struct cipher_desc desc;
326 struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; 347 struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;
327 348
328 desc.tfm = tfm; 349 desc.tfm = tfm;
329 desc.crfn = cipher->cia_encrypt; 350 desc.crfn = cipher->cia_encrypt;
@@ -339,7 +360,7 @@ static int cbc_decrypt(struct crypto_tfm *tfm,
339 unsigned int nbytes) 360 unsigned int nbytes)
340{ 361{
341 struct cipher_desc desc; 362 struct cipher_desc desc;
342 struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; 363 struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;
343 364
344 desc.tfm = tfm; 365 desc.tfm = tfm;
345 desc.crfn = cipher->cia_decrypt; 366 desc.crfn = cipher->cia_decrypt;
@@ -355,7 +376,7 @@ static int cbc_decrypt_iv(struct crypto_tfm *tfm,
355 unsigned int nbytes, u8 *iv) 376 unsigned int nbytes, u8 *iv)
356{ 377{
357 struct cipher_desc desc; 378 struct cipher_desc desc;
358 struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; 379 struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;
359 380
360 desc.tfm = tfm; 381 desc.tfm = tfm;
361 desc.crfn = cipher->cia_decrypt; 382 desc.crfn = cipher->cia_decrypt;
@@ -388,17 +409,67 @@ int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags)
388 return 0; 409 return 0;
389} 410}
390 411
412static void cipher_crypt_unaligned(void (*fn)(struct crypto_tfm *, u8 *,
413 const u8 *),
414 struct crypto_tfm *tfm,
415 u8 *dst, const u8 *src)
416{
417 unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
418 unsigned int size = crypto_tfm_alg_blocksize(tfm);
419 u8 buffer[size + alignmask];
420 u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
421
422 memcpy(tmp, src, size);
423 fn(tfm, tmp, tmp);
424 memcpy(dst, tmp, size);
425}
426
427static void cipher_encrypt_unaligned(struct crypto_tfm *tfm,
428 u8 *dst, const u8 *src)
429{
430 unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
431 struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
432
433 if (unlikely(((unsigned long)dst | (unsigned long)src) & alignmask)) {
434 cipher_crypt_unaligned(cipher->cia_encrypt, tfm, dst, src);
435 return;
436 }
437
438 cipher->cia_encrypt(tfm, dst, src);
439}
440
441static void cipher_decrypt_unaligned(struct crypto_tfm *tfm,
442 u8 *dst, const u8 *src)
443{
444 unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
445 struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
446
447 if (unlikely(((unsigned long)dst | (unsigned long)src) & alignmask)) {
448 cipher_crypt_unaligned(cipher->cia_decrypt, tfm, dst, src);
449 return;
450 }
451
452 cipher->cia_decrypt(tfm, dst, src);
453}
454
391int crypto_init_cipher_ops(struct crypto_tfm *tfm) 455int crypto_init_cipher_ops(struct crypto_tfm *tfm)
392{ 456{
393 int ret = 0; 457 int ret = 0;
394 struct cipher_tfm *ops = &tfm->crt_cipher; 458 struct cipher_tfm *ops = &tfm->crt_cipher;
459 struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
395 460
396 ops->cit_setkey = setkey; 461 ops->cit_setkey = setkey;
462 ops->cit_encrypt_one = crypto_tfm_alg_alignmask(tfm) ?
463 cipher_encrypt_unaligned : cipher->cia_encrypt;
464 ops->cit_decrypt_one = crypto_tfm_alg_alignmask(tfm) ?
465 cipher_decrypt_unaligned : cipher->cia_decrypt;
397 466
398 switch (tfm->crt_cipher.cit_mode) { 467 switch (tfm->crt_cipher.cit_mode) {
399 case CRYPTO_TFM_MODE_ECB: 468 case CRYPTO_TFM_MODE_ECB:
400 ops->cit_encrypt = ecb_encrypt; 469 ops->cit_encrypt = ecb_encrypt;
401 ops->cit_decrypt = ecb_decrypt; 470 ops->cit_decrypt = ecb_decrypt;
471 ops->cit_encrypt_iv = nocrypt_iv;
472 ops->cit_decrypt_iv = nocrypt_iv;
402 break; 473 break;
403 474
404 case CRYPTO_TFM_MODE_CBC: 475 case CRYPTO_TFM_MODE_CBC:
diff --git a/crypto/crc32c.c b/crypto/crc32c.c
index f2660123aeb4..0fa744392a4c 100644
--- a/crypto/crc32c.c
+++ b/crypto/crc32c.c
@@ -16,14 +16,14 @@
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/crypto.h> 17#include <linux/crypto.h>
18#include <linux/crc32c.h> 18#include <linux/crc32c.h>
19#include <linux/types.h> 19#include <linux/kernel.h>
20#include <asm/byteorder.h>
21 20
22#define CHKSUM_BLOCK_SIZE 32 21#define CHKSUM_BLOCK_SIZE 32
23#define CHKSUM_DIGEST_SIZE 4 22#define CHKSUM_DIGEST_SIZE 4
24 23
25struct chksum_ctx { 24struct chksum_ctx {
26 u32 crc; 25 u32 crc;
26 u32 key;
27}; 27};
28 28
29/* 29/*
@@ -35,7 +35,7 @@ static void chksum_init(struct crypto_tfm *tfm)
35{ 35{
36 struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); 36 struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
37 37
38 mctx->crc = ~(u32)0; /* common usage */ 38 mctx->crc = mctx->key;
39} 39}
40 40
41/* 41/*
@@ -44,16 +44,15 @@ static void chksum_init(struct crypto_tfm *tfm)
44 * the seed. 44 * the seed.
45 */ 45 */
46static int chksum_setkey(struct crypto_tfm *tfm, const u8 *key, 46static int chksum_setkey(struct crypto_tfm *tfm, const u8 *key,
47 unsigned int keylen, u32 *flags) 47 unsigned int keylen)
48{ 48{
49 struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); 49 struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
50 50
51 if (keylen != sizeof(mctx->crc)) { 51 if (keylen != sizeof(mctx->crc)) {
52 if (flags) 52 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
53 *flags = CRYPTO_TFM_RES_BAD_KEY_LEN;
54 return -EINVAL; 53 return -EINVAL;
55 } 54 }
56 mctx->crc = __cpu_to_le32(*(u32 *)key); 55 mctx->key = le32_to_cpu(*(__le32 *)key);
57 return 0; 56 return 0;
58} 57}
59 58
@@ -61,19 +60,23 @@ static void chksum_update(struct crypto_tfm *tfm, const u8 *data,
61 unsigned int length) 60 unsigned int length)
62{ 61{
63 struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); 62 struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
64 u32 mcrc;
65 63
66 mcrc = crc32c(mctx->crc, data, (size_t)length); 64 mctx->crc = crc32c(mctx->crc, data, length);
67
68 mctx->crc = mcrc;
69} 65}
70 66
71static void chksum_final(struct crypto_tfm *tfm, u8 *out) 67static void chksum_final(struct crypto_tfm *tfm, u8 *out)
72{ 68{
73 struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); 69 struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
74 u32 mcrc = (mctx->crc ^ ~(u32)0);
75 70
76 *(u32 *)out = __le32_to_cpu(mcrc); 71 *(__le32 *)out = ~cpu_to_le32(mctx->crc);
72}
73
74static int crc32c_cra_init(struct crypto_tfm *tfm)
75{
76 struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
77
78 mctx->key = ~0;
79 return 0;
77} 80}
78 81
79static struct crypto_alg alg = { 82static struct crypto_alg alg = {
@@ -83,6 +86,7 @@ static struct crypto_alg alg = {
83 .cra_ctxsize = sizeof(struct chksum_ctx), 86 .cra_ctxsize = sizeof(struct chksum_ctx),
84 .cra_module = THIS_MODULE, 87 .cra_module = THIS_MODULE,
85 .cra_list = LIST_HEAD_INIT(alg.cra_list), 88 .cra_list = LIST_HEAD_INIT(alg.cra_list),
89 .cra_init = crc32c_cra_init,
86 .cra_u = { 90 .cra_u = {
87 .digest = { 91 .digest = {
88 .dia_digestsize= CHKSUM_DIGEST_SIZE, 92 .dia_digestsize= CHKSUM_DIGEST_SIZE,
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index a0d956b52949..24dbb5d8617e 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -48,7 +48,7 @@ static void null_final(struct crypto_tfm *tfm, u8 *out)
48{ } 48{ }
49 49
50static int null_setkey(struct crypto_tfm *tfm, const u8 *key, 50static int null_setkey(struct crypto_tfm *tfm, const u8 *key,
51 unsigned int keylen, u32 *flags) 51 unsigned int keylen)
52{ return 0; } 52{ return 0; }
53 53
54static void null_crypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 54static void null_crypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
diff --git a/crypto/cryptomgr.c b/crypto/cryptomgr.c
new file mode 100644
index 000000000000..9b5b15601068
--- /dev/null
+++ b/crypto/cryptomgr.c
@@ -0,0 +1,156 @@
1/*
2 * Create default crypto algorithm instances.
3 *
4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13#include <linux/crypto.h>
14#include <linux/ctype.h>
15#include <linux/err.h>
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/notifier.h>
19#include <linux/rtnetlink.h>
20#include <linux/sched.h>
21#include <linux/string.h>
22#include <linux/workqueue.h>
23
24#include "internal.h"
25
26struct cryptomgr_param {
27 struct work_struct work;
28
29 struct {
30 struct rtattr attr;
31 struct crypto_attr_alg data;
32 } alg;
33
34 struct {
35 u32 type;
36 u32 mask;
37 char name[CRYPTO_MAX_ALG_NAME];
38 } larval;
39
40 char template[CRYPTO_MAX_ALG_NAME];
41};
42
43static void cryptomgr_probe(void *data)
44{
45 struct cryptomgr_param *param = data;
46 struct crypto_template *tmpl;
47 struct crypto_instance *inst;
48 int err;
49
50 tmpl = crypto_lookup_template(param->template);
51 if (!tmpl)
52 goto err;
53
54 do {
55 inst = tmpl->alloc(&param->alg, sizeof(param->alg));
56 if (IS_ERR(inst))
57 err = PTR_ERR(inst);
58 else if ((err = crypto_register_instance(tmpl, inst)))
59 tmpl->free(inst);
60 } while (err == -EAGAIN && !signal_pending(current));
61
62 crypto_tmpl_put(tmpl);
63
64 if (err)
65 goto err;
66
67out:
68 kfree(param);
69 return;
70
71err:
72 crypto_larval_error(param->larval.name, param->larval.type,
73 param->larval.mask);
74 goto out;
75}
76
77static int cryptomgr_schedule_probe(struct crypto_larval *larval)
78{
79 struct cryptomgr_param *param;
80 const char *name = larval->alg.cra_name;
81 const char *p;
82 unsigned int len;
83
84 param = kmalloc(sizeof(*param), GFP_KERNEL);
85 if (!param)
86 goto err;
87
88 for (p = name; isalnum(*p) || *p == '-' || *p == '_'; p++)
89 ;
90
91 len = p - name;
92 if (!len || *p != '(')
93 goto err_free_param;
94
95 memcpy(param->template, name, len);
96 param->template[len] = 0;
97
98 name = p + 1;
99 for (p = name; isalnum(*p) || *p == '-' || *p == '_'; p++)
100 ;
101
102 len = p - name;
103 if (!len || *p != ')' || p[1])
104 goto err_free_param;
105
106 param->alg.attr.rta_len = sizeof(param->alg);
107 param->alg.attr.rta_type = CRYPTOA_ALG;
108 memcpy(param->alg.data.name, name, len);
109 param->alg.data.name[len] = 0;
110
111 memcpy(param->larval.name, larval->alg.cra_name, CRYPTO_MAX_ALG_NAME);
112 param->larval.type = larval->alg.cra_flags;
113 param->larval.mask = larval->mask;
114
115 INIT_WORK(&param->work, cryptomgr_probe, param);
116 schedule_work(&param->work);
117
118 return NOTIFY_STOP;
119
120err_free_param:
121 kfree(param);
122err:
123 return NOTIFY_OK;
124}
125
126static int cryptomgr_notify(struct notifier_block *this, unsigned long msg,
127 void *data)
128{
129 switch (msg) {
130 case CRYPTO_MSG_ALG_REQUEST:
131 return cryptomgr_schedule_probe(data);
132 }
133
134 return NOTIFY_DONE;
135}
136
137static struct notifier_block cryptomgr_notifier = {
138 .notifier_call = cryptomgr_notify,
139};
140
141static int __init cryptomgr_init(void)
142{
143 return crypto_register_notifier(&cryptomgr_notifier);
144}
145
146static void __exit cryptomgr_exit(void)
147{
148 int err = crypto_unregister_notifier(&cryptomgr_notifier);
149 BUG_ON(err);
150}
151
152module_init(cryptomgr_init);
153module_exit(cryptomgr_exit);
154
155MODULE_LICENSE("GPL");
156MODULE_DESCRIPTION("Crypto Algorithm Manager");
diff --git a/crypto/des.c b/crypto/des.c
index a9d3c235a6af..1df3a714fa47 100644
--- a/crypto/des.c
+++ b/crypto/des.c
@@ -784,9 +784,10 @@ static void dkey(u32 *pe, const u8 *k)
784} 784}
785 785
786static int des_setkey(struct crypto_tfm *tfm, const u8 *key, 786static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
787 unsigned int keylen, u32 *flags) 787 unsigned int keylen)
788{ 788{
789 struct des_ctx *dctx = crypto_tfm_ctx(tfm); 789 struct des_ctx *dctx = crypto_tfm_ctx(tfm);
790 u32 *flags = &tfm->crt_flags;
790 u32 tmp[DES_EXPKEY_WORDS]; 791 u32 tmp[DES_EXPKEY_WORDS];
791 int ret; 792 int ret;
792 793
@@ -864,11 +865,12 @@ static void des_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
864 * 865 *
865 */ 866 */
866static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key, 867static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key,
867 unsigned int keylen, u32 *flags) 868 unsigned int keylen)
868{ 869{
869 const u32 *K = (const u32 *)key; 870 const u32 *K = (const u32 *)key;
870 struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm); 871 struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm);
871 u32 *expkey = dctx->expkey; 872 u32 *expkey = dctx->expkey;
873 u32 *flags = &tfm->crt_flags;
872 874
873 if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || 875 if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
874 !((K[2] ^ K[4]) | (K[3] ^ K[5])))) 876 !((K[2] ^ K[4]) | (K[3] ^ K[5]))))
diff --git a/crypto/digest.c b/crypto/digest.c
index 603006a7bef2..0155a94e4b15 100644
--- a/crypto/digest.c
+++ b/crypto/digest.c
@@ -11,29 +11,89 @@
11 * any later version. 11 * any later version.
12 * 12 *
13 */ 13 */
14#include <linux/crypto.h> 14
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/errno.h> 16#include <linux/errno.h>
17#include <linux/highmem.h> 17#include <linux/highmem.h>
18#include <asm/scatterlist.h> 18#include <linux/module.h>
19#include <linux/scatterlist.h>
20
19#include "internal.h" 21#include "internal.h"
22#include "scatterwalk.h"
20 23
21static void init(struct crypto_tfm *tfm) 24void crypto_digest_init(struct crypto_tfm *tfm)
22{ 25{
23 tfm->__crt_alg->cra_digest.dia_init(tfm); 26 struct crypto_hash *hash = crypto_hash_cast(tfm);
27 struct hash_desc desc = { .tfm = hash, .flags = tfm->crt_flags };
28
29 crypto_hash_init(&desc);
30}
31EXPORT_SYMBOL_GPL(crypto_digest_init);
32
33void crypto_digest_update(struct crypto_tfm *tfm,
34 struct scatterlist *sg, unsigned int nsg)
35{
36 struct crypto_hash *hash = crypto_hash_cast(tfm);
37 struct hash_desc desc = { .tfm = hash, .flags = tfm->crt_flags };
38 unsigned int nbytes = 0;
39 unsigned int i;
40
41 for (i = 0; i < nsg; i++)
42 nbytes += sg[i].length;
43
44 crypto_hash_update(&desc, sg, nbytes);
45}
46EXPORT_SYMBOL_GPL(crypto_digest_update);
47
48void crypto_digest_final(struct crypto_tfm *tfm, u8 *out)
49{
50 struct crypto_hash *hash = crypto_hash_cast(tfm);
51 struct hash_desc desc = { .tfm = hash, .flags = tfm->crt_flags };
52
53 crypto_hash_final(&desc, out);
24} 54}
55EXPORT_SYMBOL_GPL(crypto_digest_final);
25 56
26static void update(struct crypto_tfm *tfm, 57void crypto_digest_digest(struct crypto_tfm *tfm,
27 struct scatterlist *sg, unsigned int nsg) 58 struct scatterlist *sg, unsigned int nsg, u8 *out)
28{ 59{
60 struct crypto_hash *hash = crypto_hash_cast(tfm);
61 struct hash_desc desc = { .tfm = hash, .flags = tfm->crt_flags };
62 unsigned int nbytes = 0;
29 unsigned int i; 63 unsigned int i;
64
65 for (i = 0; i < nsg; i++)
66 nbytes += sg[i].length;
67
68 crypto_hash_digest(&desc, sg, nbytes, out);
69}
70EXPORT_SYMBOL_GPL(crypto_digest_digest);
71
72static int init(struct hash_desc *desc)
73{
74 struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
75
76 tfm->__crt_alg->cra_digest.dia_init(tfm);
77 return 0;
78}
79
80static int update(struct hash_desc *desc,
81 struct scatterlist *sg, unsigned int nbytes)
82{
83 struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
30 unsigned int alignmask = crypto_tfm_alg_alignmask(tfm); 84 unsigned int alignmask = crypto_tfm_alg_alignmask(tfm);
31 85
32 for (i = 0; i < nsg; i++) { 86 if (!nbytes)
87 return 0;
88
89 for (;;) {
90 struct page *pg = sg->page;
91 unsigned int offset = sg->offset;
92 unsigned int l = sg->length;
33 93
34 struct page *pg = sg[i].page; 94 if (unlikely(l > nbytes))
35 unsigned int offset = sg[i].offset; 95 l = nbytes;
36 unsigned int l = sg[i].length; 96 nbytes -= l;
37 97
38 do { 98 do {
39 unsigned int bytes_from_page = min(l, ((unsigned int) 99 unsigned int bytes_from_page = min(l, ((unsigned int)
@@ -55,41 +115,60 @@ static void update(struct crypto_tfm *tfm,
55 tfm->__crt_alg->cra_digest.dia_update(tfm, p, 115 tfm->__crt_alg->cra_digest.dia_update(tfm, p,
56 bytes_from_page); 116 bytes_from_page);
57 crypto_kunmap(src, 0); 117 crypto_kunmap(src, 0);
58 crypto_yield(tfm); 118 crypto_yield(desc->flags);
59 offset = 0; 119 offset = 0;
60 pg++; 120 pg++;
61 l -= bytes_from_page; 121 l -= bytes_from_page;
62 } while (l > 0); 122 } while (l > 0);
123
124 if (!nbytes)
125 break;
126 sg = sg_next(sg);
63 } 127 }
128
129 return 0;
64} 130}
65 131
66static void final(struct crypto_tfm *tfm, u8 *out) 132static int final(struct hash_desc *desc, u8 *out)
67{ 133{
134 struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
68 unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); 135 unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
136 struct digest_alg *digest = &tfm->__crt_alg->cra_digest;
137
69 if (unlikely((unsigned long)out & alignmask)) { 138 if (unlikely((unsigned long)out & alignmask)) {
70 unsigned int size = crypto_tfm_alg_digestsize(tfm); 139 unsigned long align = alignmask + 1;
71 u8 buffer[size + alignmask]; 140 unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm);
72 u8 *dst = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); 141 u8 *dst = (u8 *)ALIGN(addr, align) +
73 tfm->__crt_alg->cra_digest.dia_final(tfm, dst); 142 ALIGN(tfm->__crt_alg->cra_ctxsize, align);
74 memcpy(out, dst, size); 143
144 digest->dia_final(tfm, dst);
145 memcpy(out, dst, digest->dia_digestsize);
75 } else 146 } else
76 tfm->__crt_alg->cra_digest.dia_final(tfm, out); 147 digest->dia_final(tfm, out);
148
149 return 0;
150}
151
152static int nosetkey(struct crypto_hash *tfm, const u8 *key, unsigned int keylen)
153{
154 crypto_hash_clear_flags(tfm, CRYPTO_TFM_RES_MASK);
155 return -ENOSYS;
77} 156}
78 157
79static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) 158static int setkey(struct crypto_hash *hash, const u8 *key, unsigned int keylen)
80{ 159{
81 u32 flags; 160 struct crypto_tfm *tfm = crypto_hash_tfm(hash);
82 if (tfm->__crt_alg->cra_digest.dia_setkey == NULL) 161
83 return -ENOSYS; 162 crypto_hash_clear_flags(hash, CRYPTO_TFM_RES_MASK);
84 return tfm->__crt_alg->cra_digest.dia_setkey(tfm, key, keylen, &flags); 163 return tfm->__crt_alg->cra_digest.dia_setkey(tfm, key, keylen);
85} 164}
86 165
87static void digest(struct crypto_tfm *tfm, 166static int digest(struct hash_desc *desc,
88 struct scatterlist *sg, unsigned int nsg, u8 *out) 167 struct scatterlist *sg, unsigned int nbytes, u8 *out)
89{ 168{
90 init(tfm); 169 init(desc);
91 update(tfm, sg, nsg); 170 update(desc, sg, nbytes);
92 final(tfm, out); 171 return final(desc, out);
93} 172}
94 173
95int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags) 174int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags)
@@ -99,18 +178,22 @@ int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags)
99 178
100int crypto_init_digest_ops(struct crypto_tfm *tfm) 179int crypto_init_digest_ops(struct crypto_tfm *tfm)
101{ 180{
102 struct digest_tfm *ops = &tfm->crt_digest; 181 struct hash_tfm *ops = &tfm->crt_hash;
182 struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
183
184 if (dalg->dia_digestsize > crypto_tfm_alg_blocksize(tfm))
185 return -EINVAL;
103 186
104 ops->dit_init = init; 187 ops->init = init;
105 ops->dit_update = update; 188 ops->update = update;
106 ops->dit_final = final; 189 ops->final = final;
107 ops->dit_digest = digest; 190 ops->digest = digest;
108 ops->dit_setkey = setkey; 191 ops->setkey = dalg->dia_setkey ? setkey : nosetkey;
192 ops->digestsize = dalg->dia_digestsize;
109 193
110 return crypto_alloc_hmac_block(tfm); 194 return 0;
111} 195}
112 196
113void crypto_exit_digest_ops(struct crypto_tfm *tfm) 197void crypto_exit_digest_ops(struct crypto_tfm *tfm)
114{ 198{
115 crypto_free_hmac_block(tfm);
116} 199}
diff --git a/crypto/ecb.c b/crypto/ecb.c
new file mode 100644
index 000000000000..f239aa9c4017
--- /dev/null
+++ b/crypto/ecb.c
@@ -0,0 +1,181 @@
1/*
2 * ECB: Electronic CodeBook mode
3 *
4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13#include <crypto/algapi.h>
14#include <linux/err.h>
15#include <linux/init.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/scatterlist.h>
19#include <linux/slab.h>
20
21struct crypto_ecb_ctx {
22 struct crypto_cipher *child;
23};
24
25static int crypto_ecb_setkey(struct crypto_tfm *parent, const u8 *key,
26 unsigned int keylen)
27{
28 struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(parent);
29 struct crypto_cipher *child = ctx->child;
30 int err;
31
32 crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
33 crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
34 CRYPTO_TFM_REQ_MASK);
35 err = crypto_cipher_setkey(child, key, keylen);
36 crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
37 CRYPTO_TFM_RES_MASK);
38 return err;
39}
40
41static int crypto_ecb_crypt(struct blkcipher_desc *desc,
42 struct blkcipher_walk *walk,
43 struct crypto_cipher *tfm,
44 void (*fn)(struct crypto_tfm *, u8 *, const u8 *))
45{
46 int bsize = crypto_cipher_blocksize(tfm);
47 unsigned int nbytes;
48 int err;
49
50 err = blkcipher_walk_virt(desc, walk);
51
52 while ((nbytes = walk->nbytes)) {
53 u8 *wsrc = walk->src.virt.addr;
54 u8 *wdst = walk->dst.virt.addr;
55
56 do {
57 fn(crypto_cipher_tfm(tfm), wdst, wsrc);
58
59 wsrc += bsize;
60 wdst += bsize;
61 } while ((nbytes -= bsize) >= bsize);
62
63 err = blkcipher_walk_done(desc, walk, nbytes);
64 }
65
66 return err;
67}
68
69static int crypto_ecb_encrypt(struct blkcipher_desc *desc,
70 struct scatterlist *dst, struct scatterlist *src,
71 unsigned int nbytes)
72{
73 struct blkcipher_walk walk;
74 struct crypto_blkcipher *tfm = desc->tfm;
75 struct crypto_ecb_ctx *ctx = crypto_blkcipher_ctx(tfm);
76 struct crypto_cipher *child = ctx->child;
77
78 blkcipher_walk_init(&walk, dst, src, nbytes);
79 return crypto_ecb_crypt(desc, &walk, child,
80 crypto_cipher_alg(child)->cia_encrypt);
81}
82
83static int crypto_ecb_decrypt(struct blkcipher_desc *desc,
84 struct scatterlist *dst, struct scatterlist *src,
85 unsigned int nbytes)
86{
87 struct blkcipher_walk walk;
88 struct crypto_blkcipher *tfm = desc->tfm;
89 struct crypto_ecb_ctx *ctx = crypto_blkcipher_ctx(tfm);
90 struct crypto_cipher *child = ctx->child;
91
92 blkcipher_walk_init(&walk, dst, src, nbytes);
93 return crypto_ecb_crypt(desc, &walk, child,
94 crypto_cipher_alg(child)->cia_decrypt);
95}
96
97static int crypto_ecb_init_tfm(struct crypto_tfm *tfm)
98{
99 struct crypto_instance *inst = (void *)tfm->__crt_alg;
100 struct crypto_spawn *spawn = crypto_instance_ctx(inst);
101 struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(tfm);
102
103 tfm = crypto_spawn_tfm(spawn);
104 if (IS_ERR(tfm))
105 return PTR_ERR(tfm);
106
107 ctx->child = crypto_cipher_cast(tfm);
108 return 0;
109}
110
111static void crypto_ecb_exit_tfm(struct crypto_tfm *tfm)
112{
113 struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(tfm);
114 crypto_free_cipher(ctx->child);
115}
116
117static struct crypto_instance *crypto_ecb_alloc(void *param, unsigned int len)
118{
119 struct crypto_instance *inst;
120 struct crypto_alg *alg;
121
122 alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER,
123 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
124 if (IS_ERR(alg))
125 return ERR_PTR(PTR_ERR(alg));
126
127 inst = crypto_alloc_instance("ecb", alg);
128 if (IS_ERR(inst))
129 goto out_put_alg;
130
131 inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
132 inst->alg.cra_priority = alg->cra_priority;
133 inst->alg.cra_blocksize = alg->cra_blocksize;
134 inst->alg.cra_alignmask = alg->cra_alignmask;
135 inst->alg.cra_type = &crypto_blkcipher_type;
136
137 inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
138 inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
139
140 inst->alg.cra_ctxsize = sizeof(struct crypto_ecb_ctx);
141
142 inst->alg.cra_init = crypto_ecb_init_tfm;
143 inst->alg.cra_exit = crypto_ecb_exit_tfm;
144
145 inst->alg.cra_blkcipher.setkey = crypto_ecb_setkey;
146 inst->alg.cra_blkcipher.encrypt = crypto_ecb_encrypt;
147 inst->alg.cra_blkcipher.decrypt = crypto_ecb_decrypt;
148
149out_put_alg:
150 crypto_mod_put(alg);
151 return inst;
152}
153
154static void crypto_ecb_free(struct crypto_instance *inst)
155{
156 crypto_drop_spawn(crypto_instance_ctx(inst));
157 kfree(inst);
158}
159
160static struct crypto_template crypto_ecb_tmpl = {
161 .name = "ecb",
162 .alloc = crypto_ecb_alloc,
163 .free = crypto_ecb_free,
164 .module = THIS_MODULE,
165};
166
167static int __init crypto_ecb_module_init(void)
168{
169 return crypto_register_template(&crypto_ecb_tmpl);
170}
171
172static void __exit crypto_ecb_module_exit(void)
173{
174 crypto_unregister_template(&crypto_ecb_tmpl);
175}
176
177module_init(crypto_ecb_module_init);
178module_exit(crypto_ecb_module_exit);
179
180MODULE_LICENSE("GPL");
181MODULE_DESCRIPTION("ECB block cipher algorithm");
diff --git a/crypto/hash.c b/crypto/hash.c
new file mode 100644
index 000000000000..cdec23d885fe
--- /dev/null
+++ b/crypto/hash.c
@@ -0,0 +1,61 @@
1/*
2 * Cryptographic Hash operations.
3 *
4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 */
11
12#include <linux/errno.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/seq_file.h>
16
17#include "internal.h"
18
19static unsigned int crypto_hash_ctxsize(struct crypto_alg *alg)
20{
21 return alg->cra_ctxsize;
22}
23
24static int crypto_init_hash_ops(struct crypto_tfm *tfm)
25{
26 struct hash_tfm *crt = &tfm->crt_hash;
27 struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
28
29 if (alg->digestsize > crypto_tfm_alg_blocksize(tfm))
30 return -EINVAL;
31
32 crt->init = alg->init;
33 crt->update = alg->update;
34 crt->final = alg->final;
35 crt->digest = alg->digest;
36 crt->setkey = alg->setkey;
37 crt->digestsize = alg->digestsize;
38
39 return 0;
40}
41
42static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg)
43 __attribute_used__;
44static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg)
45{
46 seq_printf(m, "type : hash\n");
47 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
48 seq_printf(m, "digestsize : %u\n", alg->cra_hash.digestsize);
49}
50
51const struct crypto_type crypto_hash_type = {
52 .ctxsize = crypto_hash_ctxsize,
53 .init = crypto_init_hash_ops,
54#ifdef CONFIG_PROC_FS
55 .show = crypto_hash_show,
56#endif
57};
58EXPORT_SYMBOL_GPL(crypto_hash_type);
59
60MODULE_LICENSE("GPL");
61MODULE_DESCRIPTION("Generic cryptographic hash type");
diff --git a/crypto/hmac.c b/crypto/hmac.c
index 46120dee5ada..f403b6946047 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -4,121 +4,249 @@
4 * HMAC: Keyed-Hashing for Message Authentication (RFC2104). 4 * HMAC: Keyed-Hashing for Message Authentication (RFC2104).
5 * 5 *
6 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> 6 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
7 * 8 *
8 * The HMAC implementation is derived from USAGI. 9 * The HMAC implementation is derived from USAGI.
9 * Copyright (c) 2002 Kazunori Miyazawa <miyazawa@linux-ipv6.org> / USAGI 10 * Copyright (c) 2002 Kazunori Miyazawa <miyazawa@linux-ipv6.org> / USAGI
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify it 12 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the Free 13 * under the terms of the GNU General Public License as published by the Free
13 * Software Foundation; either version 2 of the License, or (at your option) 14 * Software Foundation; either version 2 of the License, or (at your option)
14 * any later version. 15 * any later version.
15 * 16 *
16 */ 17 */
17#include <linux/crypto.h> 18
18#include <linux/mm.h> 19#include <crypto/algapi.h>
19#include <linux/highmem.h> 20#include <linux/err.h>
20#include <linux/slab.h> 21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
21#include <linux/scatterlist.h> 24#include <linux/scatterlist.h>
22#include "internal.h" 25#include <linux/slab.h>
26#include <linux/string.h>
23 27
24static void hash_key(struct crypto_tfm *tfm, u8 *key, unsigned int keylen) 28struct hmac_ctx {
29 struct crypto_hash *child;
30};
31
32static inline void *align_ptr(void *p, unsigned int align)
25{ 33{
26 struct scatterlist tmp; 34 return (void *)ALIGN((unsigned long)p, align);
27
28 sg_set_buf(&tmp, key, keylen);
29 crypto_digest_digest(tfm, &tmp, 1, key);
30} 35}
31 36
32int crypto_alloc_hmac_block(struct crypto_tfm *tfm) 37static inline struct hmac_ctx *hmac_ctx(struct crypto_hash *tfm)
33{ 38{
34 int ret = 0; 39 return align_ptr(crypto_hash_ctx_aligned(tfm) +
40 crypto_hash_blocksize(tfm) * 2 +
41 crypto_hash_digestsize(tfm), sizeof(void *));
42}
43
44static int hmac_setkey(struct crypto_hash *parent,
45 const u8 *inkey, unsigned int keylen)
46{
47 int bs = crypto_hash_blocksize(parent);
48 int ds = crypto_hash_digestsize(parent);
49 char *ipad = crypto_hash_ctx_aligned(parent);
50 char *opad = ipad + bs;
51 char *digest = opad + bs;
52 struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *));
53 struct crypto_hash *tfm = ctx->child;
54 unsigned int i;
55
56 if (keylen > bs) {
57 struct hash_desc desc;
58 struct scatterlist tmp;
59 int err;
60
61 desc.tfm = tfm;
62 desc.flags = crypto_hash_get_flags(parent);
63 desc.flags &= CRYPTO_TFM_REQ_MAY_SLEEP;
64 sg_set_buf(&tmp, inkey, keylen);
35 65
36 BUG_ON(!crypto_tfm_alg_blocksize(tfm)); 66 err = crypto_hash_digest(&desc, &tmp, keylen, digest);
37 67 if (err)
38 tfm->crt_digest.dit_hmac_block = kmalloc(crypto_tfm_alg_blocksize(tfm), 68 return err;
39 GFP_KERNEL);
40 if (tfm->crt_digest.dit_hmac_block == NULL)
41 ret = -ENOMEM;
42 69
43 return ret; 70 inkey = digest;
44 71 keylen = ds;
72 }
73
74 memcpy(ipad, inkey, keylen);
75 memset(ipad + keylen, 0, bs - keylen);
76 memcpy(opad, ipad, bs);
77
78 for (i = 0; i < bs; i++) {
79 ipad[i] ^= 0x36;
80 opad[i] ^= 0x5c;
81 }
82
83 return 0;
45} 84}
46 85
47void crypto_free_hmac_block(struct crypto_tfm *tfm) 86static int hmac_init(struct hash_desc *pdesc)
48{ 87{
49 kfree(tfm->crt_digest.dit_hmac_block); 88 struct crypto_hash *parent = pdesc->tfm;
89 int bs = crypto_hash_blocksize(parent);
90 int ds = crypto_hash_digestsize(parent);
91 char *ipad = crypto_hash_ctx_aligned(parent);
92 struct hmac_ctx *ctx = align_ptr(ipad + bs * 2 + ds, sizeof(void *));
93 struct hash_desc desc;
94 struct scatterlist tmp;
95
96 desc.tfm = ctx->child;
97 desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
98 sg_set_buf(&tmp, ipad, bs);
99
100 return unlikely(crypto_hash_init(&desc)) ?:
101 crypto_hash_update(&desc, &tmp, 1);
50} 102}
51 103
52void crypto_hmac_init(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen) 104static int hmac_update(struct hash_desc *pdesc,
105 struct scatterlist *sg, unsigned int nbytes)
53{ 106{
54 unsigned int i; 107 struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm);
108 struct hash_desc desc;
109
110 desc.tfm = ctx->child;
111 desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
112
113 return crypto_hash_update(&desc, sg, nbytes);
114}
115
116static int hmac_final(struct hash_desc *pdesc, u8 *out)
117{
118 struct crypto_hash *parent = pdesc->tfm;
119 int bs = crypto_hash_blocksize(parent);
120 int ds = crypto_hash_digestsize(parent);
121 char *opad = crypto_hash_ctx_aligned(parent) + bs;
122 char *digest = opad + bs;
123 struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *));
124 struct hash_desc desc;
55 struct scatterlist tmp; 125 struct scatterlist tmp;
56 char *ipad = tfm->crt_digest.dit_hmac_block;
57
58 if (*keylen > crypto_tfm_alg_blocksize(tfm)) {
59 hash_key(tfm, key, *keylen);
60 *keylen = crypto_tfm_alg_digestsize(tfm);
61 }
62 126
63 memset(ipad, 0, crypto_tfm_alg_blocksize(tfm)); 127 desc.tfm = ctx->child;
64 memcpy(ipad, key, *keylen); 128 desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
129 sg_set_buf(&tmp, opad, bs + ds);
65 130
66 for (i = 0; i < crypto_tfm_alg_blocksize(tfm); i++) 131 return unlikely(crypto_hash_final(&desc, digest)) ?:
67 ipad[i] ^= 0x36; 132 crypto_hash_digest(&desc, &tmp, bs + ds, out);
133}
68 134
69 sg_set_buf(&tmp, ipad, crypto_tfm_alg_blocksize(tfm)); 135static int hmac_digest(struct hash_desc *pdesc, struct scatterlist *sg,
70 136 unsigned int nbytes, u8 *out)
71 crypto_digest_init(tfm); 137{
72 crypto_digest_update(tfm, &tmp, 1); 138 struct crypto_hash *parent = pdesc->tfm;
139 int bs = crypto_hash_blocksize(parent);
140 int ds = crypto_hash_digestsize(parent);
141 char *ipad = crypto_hash_ctx_aligned(parent);
142 char *opad = ipad + bs;
143 char *digest = opad + bs;
144 struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *));
145 struct hash_desc desc;
146 struct scatterlist sg1[2];
147 struct scatterlist sg2[1];
148
149 desc.tfm = ctx->child;
150 desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
151
152 sg_set_buf(sg1, ipad, bs);
153 sg1[1].page = (void *)sg;
154 sg1[1].length = 0;
155 sg_set_buf(sg2, opad, bs + ds);
156
157 return unlikely(crypto_hash_digest(&desc, sg1, nbytes + bs, digest)) ?:
158 crypto_hash_digest(&desc, sg2, bs + ds, out);
73} 159}
74 160
75void crypto_hmac_update(struct crypto_tfm *tfm, 161static int hmac_init_tfm(struct crypto_tfm *tfm)
76 struct scatterlist *sg, unsigned int nsg)
77{ 162{
78 crypto_digest_update(tfm, sg, nsg); 163 struct crypto_instance *inst = (void *)tfm->__crt_alg;
164 struct crypto_spawn *spawn = crypto_instance_ctx(inst);
165 struct hmac_ctx *ctx = hmac_ctx(__crypto_hash_cast(tfm));
166
167 tfm = crypto_spawn_tfm(spawn);
168 if (IS_ERR(tfm))
169 return PTR_ERR(tfm);
170
171 ctx->child = crypto_hash_cast(tfm);
172 return 0;
79} 173}
80 174
81void crypto_hmac_final(struct crypto_tfm *tfm, u8 *key, 175static void hmac_exit_tfm(struct crypto_tfm *tfm)
82 unsigned int *keylen, u8 *out)
83{ 176{
84 unsigned int i; 177 struct hmac_ctx *ctx = hmac_ctx(__crypto_hash_cast(tfm));
85 struct scatterlist tmp; 178 crypto_free_hash(ctx->child);
86 char *opad = tfm->crt_digest.dit_hmac_block; 179}
87
88 if (*keylen > crypto_tfm_alg_blocksize(tfm)) {
89 hash_key(tfm, key, *keylen);
90 *keylen = crypto_tfm_alg_digestsize(tfm);
91 }
92 180
93 crypto_digest_final(tfm, out); 181static void hmac_free(struct crypto_instance *inst)
182{
183 crypto_drop_spawn(crypto_instance_ctx(inst));
184 kfree(inst);
185}
94 186
95 memset(opad, 0, crypto_tfm_alg_blocksize(tfm)); 187static struct crypto_instance *hmac_alloc(void *param, unsigned int len)
96 memcpy(opad, key, *keylen); 188{
97 189 struct crypto_instance *inst;
98 for (i = 0; i < crypto_tfm_alg_blocksize(tfm); i++) 190 struct crypto_alg *alg;
99 opad[i] ^= 0x5c; 191
192 alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_HASH,
193 CRYPTO_ALG_TYPE_HASH_MASK | CRYPTO_ALG_ASYNC);
194 if (IS_ERR(alg))
195 return ERR_PTR(PTR_ERR(alg));
196
197 inst = crypto_alloc_instance("hmac", alg);
198 if (IS_ERR(inst))
199 goto out_put_alg;
200
201 inst->alg.cra_flags = CRYPTO_ALG_TYPE_HASH;
202 inst->alg.cra_priority = alg->cra_priority;
203 inst->alg.cra_blocksize = alg->cra_blocksize;
204 inst->alg.cra_alignmask = alg->cra_alignmask;
205 inst->alg.cra_type = &crypto_hash_type;
206
207 inst->alg.cra_hash.digestsize =
208 (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
209 CRYPTO_ALG_TYPE_HASH ? alg->cra_hash.digestsize :
210 alg->cra_digest.dia_digestsize;
211
212 inst->alg.cra_ctxsize = sizeof(struct hmac_ctx) +
213 ALIGN(inst->alg.cra_blocksize * 2 +
214 inst->alg.cra_hash.digestsize,
215 sizeof(void *));
216
217 inst->alg.cra_init = hmac_init_tfm;
218 inst->alg.cra_exit = hmac_exit_tfm;
219
220 inst->alg.cra_hash.init = hmac_init;
221 inst->alg.cra_hash.update = hmac_update;
222 inst->alg.cra_hash.final = hmac_final;
223 inst->alg.cra_hash.digest = hmac_digest;
224 inst->alg.cra_hash.setkey = hmac_setkey;
225
226out_put_alg:
227 crypto_mod_put(alg);
228 return inst;
229}
100 230
101 sg_set_buf(&tmp, opad, crypto_tfm_alg_blocksize(tfm)); 231static struct crypto_template hmac_tmpl = {
232 .name = "hmac",
233 .alloc = hmac_alloc,
234 .free = hmac_free,
235 .module = THIS_MODULE,
236};
102 237
103 crypto_digest_init(tfm); 238static int __init hmac_module_init(void)
104 crypto_digest_update(tfm, &tmp, 1); 239{
105 240 return crypto_register_template(&hmac_tmpl);
106 sg_set_buf(&tmp, out, crypto_tfm_alg_digestsize(tfm));
107
108 crypto_digest_update(tfm, &tmp, 1);
109 crypto_digest_final(tfm, out);
110} 241}
111 242
112void crypto_hmac(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen, 243static void __exit hmac_module_exit(void)
113 struct scatterlist *sg, unsigned int nsg, u8 *out)
114{ 244{
115 crypto_hmac_init(tfm, key, keylen); 245 crypto_unregister_template(&hmac_tmpl);
116 crypto_hmac_update(tfm, sg, nsg);
117 crypto_hmac_final(tfm, key, keylen, out);
118} 246}
119 247
120EXPORT_SYMBOL_GPL(crypto_hmac_init); 248module_init(hmac_module_init);
121EXPORT_SYMBOL_GPL(crypto_hmac_update); 249module_exit(hmac_module_exit);
122EXPORT_SYMBOL_GPL(crypto_hmac_final);
123EXPORT_SYMBOL_GPL(crypto_hmac);
124 250
251MODULE_LICENSE("GPL");
252MODULE_DESCRIPTION("HMAC hash algorithm");
diff --git a/crypto/internal.h b/crypto/internal.h
index 959e602909a6..2da6ad4f3593 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -12,19 +12,43 @@
12 */ 12 */
13#ifndef _CRYPTO_INTERNAL_H 13#ifndef _CRYPTO_INTERNAL_H
14#define _CRYPTO_INTERNAL_H 14#define _CRYPTO_INTERNAL_H
15#include <linux/crypto.h> 15
16#include <crypto/algapi.h>
17#include <linux/completion.h>
16#include <linux/mm.h> 18#include <linux/mm.h>
17#include <linux/highmem.h> 19#include <linux/highmem.h>
18#include <linux/interrupt.h> 20#include <linux/interrupt.h>
19#include <linux/init.h> 21#include <linux/init.h>
20#include <linux/list.h> 22#include <linux/list.h>
23#include <linux/module.h>
21#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/notifier.h>
22#include <linux/rwsem.h> 26#include <linux/rwsem.h>
23#include <linux/slab.h> 27#include <linux/slab.h>
24#include <asm/kmap_types.h> 28#include <asm/kmap_types.h>
25 29
30/* Crypto notification events. */
31enum {
32 CRYPTO_MSG_ALG_REQUEST,
33 CRYPTO_MSG_ALG_REGISTER,
34 CRYPTO_MSG_ALG_UNREGISTER,
35 CRYPTO_MSG_TMPL_REGISTER,
36 CRYPTO_MSG_TMPL_UNREGISTER,
37};
38
39struct crypto_instance;
40struct crypto_template;
41
42struct crypto_larval {
43 struct crypto_alg alg;
44 struct crypto_alg *adult;
45 struct completion completion;
46 u32 mask;
47};
48
26extern struct list_head crypto_alg_list; 49extern struct list_head crypto_alg_list;
27extern struct rw_semaphore crypto_alg_sem; 50extern struct rw_semaphore crypto_alg_sem;
51extern struct blocking_notifier_head crypto_chain;
28 52
29extern enum km_type crypto_km_types[]; 53extern enum km_type crypto_km_types[];
30 54
@@ -43,36 +67,33 @@ static inline void crypto_kunmap(void *vaddr, int out)
43 kunmap_atomic(vaddr, crypto_kmap_type(out)); 67 kunmap_atomic(vaddr, crypto_kmap_type(out));
44} 68}
45 69
46static inline void crypto_yield(struct crypto_tfm *tfm) 70static inline void crypto_yield(u32 flags)
47{ 71{
48 if (tfm->crt_flags & CRYPTO_TFM_REQ_MAY_SLEEP) 72 if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
49 cond_resched(); 73 cond_resched();
50} 74}
51 75
52#ifdef CONFIG_CRYPTO_HMAC
53int crypto_alloc_hmac_block(struct crypto_tfm *tfm);
54void crypto_free_hmac_block(struct crypto_tfm *tfm);
55#else
56static inline int crypto_alloc_hmac_block(struct crypto_tfm *tfm)
57{
58 return 0;
59}
60
61static inline void crypto_free_hmac_block(struct crypto_tfm *tfm)
62{ }
63#endif
64
65#ifdef CONFIG_PROC_FS 76#ifdef CONFIG_PROC_FS
66void __init crypto_init_proc(void); 77void __init crypto_init_proc(void);
78void __exit crypto_exit_proc(void);
67#else 79#else
68static inline void crypto_init_proc(void) 80static inline void crypto_init_proc(void)
69{ } 81{ }
82static inline void crypto_exit_proc(void)
83{ }
70#endif 84#endif
71 85
72static inline unsigned int crypto_digest_ctxsize(struct crypto_alg *alg, 86static inline unsigned int crypto_digest_ctxsize(struct crypto_alg *alg,
73 int flags) 87 int flags)
74{ 88{
75 return alg->cra_ctxsize; 89 unsigned int len = alg->cra_ctxsize;
90
91 if (alg->cra_alignmask) {
92 len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
93 len += alg->cra_digest.dia_digestsize;
94 }
95
96 return len;
76} 97}
77 98
78static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg, 99static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg,
@@ -96,6 +117,10 @@ static inline unsigned int crypto_compress_ctxsize(struct crypto_alg *alg,
96 return alg->cra_ctxsize; 117 return alg->cra_ctxsize;
97} 118}
98 119
120struct crypto_alg *crypto_mod_get(struct crypto_alg *alg);
121struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, u32 mask);
122struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask);
123
99int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags); 124int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags);
100int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags); 125int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags);
101int crypto_init_compress_flags(struct crypto_tfm *tfm, u32 flags); 126int crypto_init_compress_flags(struct crypto_tfm *tfm, u32 flags);
@@ -108,5 +133,52 @@ void crypto_exit_digest_ops(struct crypto_tfm *tfm);
108void crypto_exit_cipher_ops(struct crypto_tfm *tfm); 133void crypto_exit_cipher_ops(struct crypto_tfm *tfm);
109void crypto_exit_compress_ops(struct crypto_tfm *tfm); 134void crypto_exit_compress_ops(struct crypto_tfm *tfm);
110 135
136void crypto_larval_error(const char *name, u32 type, u32 mask);
137
138void crypto_shoot_alg(struct crypto_alg *alg);
139struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 flags);
140
141int crypto_register_instance(struct crypto_template *tmpl,
142 struct crypto_instance *inst);
143
144int crypto_register_notifier(struct notifier_block *nb);
145int crypto_unregister_notifier(struct notifier_block *nb);
146
147static inline void crypto_alg_put(struct crypto_alg *alg)
148{
149 if (atomic_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy)
150 alg->cra_destroy(alg);
151}
152
153static inline int crypto_tmpl_get(struct crypto_template *tmpl)
154{
155 return try_module_get(tmpl->module);
156}
157
158static inline void crypto_tmpl_put(struct crypto_template *tmpl)
159{
160 module_put(tmpl->module);
161}
162
163static inline int crypto_is_larval(struct crypto_alg *alg)
164{
165 return alg->cra_flags & CRYPTO_ALG_LARVAL;
166}
167
168static inline int crypto_is_dead(struct crypto_alg *alg)
169{
170 return alg->cra_flags & CRYPTO_ALG_DEAD;
171}
172
173static inline int crypto_is_moribund(struct crypto_alg *alg)
174{
175 return alg->cra_flags & (CRYPTO_ALG_DEAD | CRYPTO_ALG_DYING);
176}
177
178static inline int crypto_notify(unsigned long val, void *v)
179{
180 return blocking_notifier_call_chain(&crypto_chain, val, v);
181}
182
111#endif /* _CRYPTO_INTERNAL_H */ 183#endif /* _CRYPTO_INTERNAL_H */
112 184
diff --git a/crypto/khazad.c b/crypto/khazad.c
index d4c9d3657b36..9fa24a2dd6ff 100644
--- a/crypto/khazad.c
+++ b/crypto/khazad.c
@@ -755,19 +755,13 @@ static const u64 c[KHAZAD_ROUNDS + 1] = {
755}; 755};
756 756
757static int khazad_setkey(struct crypto_tfm *tfm, const u8 *in_key, 757static int khazad_setkey(struct crypto_tfm *tfm, const u8 *in_key,
758 unsigned int key_len, u32 *flags) 758 unsigned int key_len)
759{ 759{
760 struct khazad_ctx *ctx = crypto_tfm_ctx(tfm); 760 struct khazad_ctx *ctx = crypto_tfm_ctx(tfm);
761 const __be32 *key = (const __be32 *)in_key; 761 const __be32 *key = (const __be32 *)in_key;
762 int r; 762 int r;
763 const u64 *S = T7; 763 const u64 *S = T7;
764 u64 K2, K1; 764 u64 K2, K1;
765
766 if (key_len != 16)
767 {
768 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
769 return -EINVAL;
770 }
771 765
772 /* key is supposed to be 32-bit aligned */ 766 /* key is supposed to be 32-bit aligned */
773 K2 = ((u64)be32_to_cpu(key[0]) << 32) | be32_to_cpu(key[1]); 767 K2 = ((u64)be32_to_cpu(key[0]) << 32) | be32_to_cpu(key[1]);
diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c
index d061da21cfda..094397b48849 100644
--- a/crypto/michael_mic.c
+++ b/crypto/michael_mic.c
@@ -123,14 +123,13 @@ static void michael_final(struct crypto_tfm *tfm, u8 *out)
123 123
124 124
125static int michael_setkey(struct crypto_tfm *tfm, const u8 *key, 125static int michael_setkey(struct crypto_tfm *tfm, const u8 *key,
126 unsigned int keylen, u32 *flags) 126 unsigned int keylen)
127{ 127{
128 struct michael_mic_ctx *mctx = crypto_tfm_ctx(tfm); 128 struct michael_mic_ctx *mctx = crypto_tfm_ctx(tfm);
129 const __le32 *data = (const __le32 *)key; 129 const __le32 *data = (const __le32 *)key;
130 130
131 if (keylen != 8) { 131 if (keylen != 8) {
132 if (flags) 132 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
133 *flags = CRYPTO_TFM_RES_BAD_KEY_LEN;
134 return -EINVAL; 133 return -EINVAL;
135 } 134 }
136 135
diff --git a/crypto/proc.c b/crypto/proc.c
index c0a5dd7ce2cc..dabce0676f63 100644
--- a/crypto/proc.c
+++ b/crypto/proc.c
@@ -12,6 +12,8 @@
12 * any later version. 12 * any later version.
13 * 13 *
14 */ 14 */
15
16#include <asm/atomic.h>
15#include <linux/init.h> 17#include <linux/init.h>
16#include <linux/crypto.h> 18#include <linux/crypto.h>
17#include <linux/rwsem.h> 19#include <linux/rwsem.h>
@@ -54,6 +56,7 @@ static int c_show(struct seq_file *m, void *p)
54 seq_printf(m, "driver : %s\n", alg->cra_driver_name); 56 seq_printf(m, "driver : %s\n", alg->cra_driver_name);
55 seq_printf(m, "module : %s\n", module_name(alg->cra_module)); 57 seq_printf(m, "module : %s\n", module_name(alg->cra_module));
56 seq_printf(m, "priority : %d\n", alg->cra_priority); 58 seq_printf(m, "priority : %d\n", alg->cra_priority);
59 seq_printf(m, "refcnt : %d\n", atomic_read(&alg->cra_refcnt));
57 60
58 switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { 61 switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
59 case CRYPTO_ALG_TYPE_CIPHER: 62 case CRYPTO_ALG_TYPE_CIPHER:
@@ -75,7 +78,10 @@ static int c_show(struct seq_file *m, void *p)
75 seq_printf(m, "type : compression\n"); 78 seq_printf(m, "type : compression\n");
76 break; 79 break;
77 default: 80 default:
78 seq_printf(m, "type : unknown\n"); 81 if (alg->cra_type && alg->cra_type->show)
82 alg->cra_type->show(m, alg);
83 else
84 seq_printf(m, "type : unknown\n");
79 break; 85 break;
80 } 86 }
81 87
@@ -110,3 +116,8 @@ void __init crypto_init_proc(void)
110 if (proc) 116 if (proc)
111 proc->proc_fops = &proc_crypto_ops; 117 proc->proc_fops = &proc_crypto_ops;
112} 118}
119
120void __exit crypto_exit_proc(void)
121{
122 remove_proc_entry("crypto", NULL);
123}
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index 2953e2cc56f0..35172d3f043b 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -15,9 +15,11 @@
15 */ 15 */
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/mm.h> 17#include <linux/mm.h>
18#include <linux/module.h>
18#include <linux/pagemap.h> 19#include <linux/pagemap.h>
19#include <linux/highmem.h> 20#include <linux/highmem.h>
20#include <asm/scatterlist.h> 21#include <linux/scatterlist.h>
22
21#include "internal.h" 23#include "internal.h"
22#include "scatterwalk.h" 24#include "scatterwalk.h"
23 25
@@ -27,88 +29,77 @@ enum km_type crypto_km_types[] = {
27 KM_SOFTIRQ0, 29 KM_SOFTIRQ0,
28 KM_SOFTIRQ1, 30 KM_SOFTIRQ1,
29}; 31};
32EXPORT_SYMBOL_GPL(crypto_km_types);
30 33
31static void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out) 34static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out)
32{ 35{
33 if (out) 36 void *src = out ? buf : sgdata;
34 memcpy(sgdata, buf, nbytes); 37 void *dst = out ? sgdata : buf;
35 else 38
36 memcpy(buf, sgdata, nbytes); 39 memcpy(dst, src, nbytes);
37} 40}
38 41
39void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg) 42void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg)
40{ 43{
41 unsigned int rest_of_page;
42
43 walk->sg = sg; 44 walk->sg = sg;
44 45
45 walk->page = sg->page;
46 walk->len_this_segment = sg->length;
47
48 BUG_ON(!sg->length); 46 BUG_ON(!sg->length);
49 47
50 rest_of_page = PAGE_CACHE_SIZE - (sg->offset & (PAGE_CACHE_SIZE - 1));
51 walk->len_this_page = min(sg->length, rest_of_page);
52 walk->offset = sg->offset; 48 walk->offset = sg->offset;
53} 49}
50EXPORT_SYMBOL_GPL(scatterwalk_start);
54 51
55void scatterwalk_map(struct scatter_walk *walk, int out) 52void *scatterwalk_map(struct scatter_walk *walk, int out)
56{
57 walk->data = crypto_kmap(walk->page, out) + walk->offset;
58}
59
60static inline void scatterwalk_unmap(struct scatter_walk *walk, int out)
61{ 53{
62 /* walk->data may be pointing the first byte of the next page; 54 return crypto_kmap(scatterwalk_page(walk), out) +
63 however, we know we transfered at least one byte. So, 55 offset_in_page(walk->offset);
64 walk->data - 1 will be a virtual address in the mapped page. */
65 crypto_kunmap(walk->data - 1, out);
66} 56}
57EXPORT_SYMBOL_GPL(scatterwalk_map);
67 58
68static void scatterwalk_pagedone(struct scatter_walk *walk, int out, 59static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
69 unsigned int more) 60 unsigned int more)
70{ 61{
71 if (out) 62 if (out)
72 flush_dcache_page(walk->page); 63 flush_dcache_page(scatterwalk_page(walk));
73 64
74 if (more) { 65 if (more) {
75 walk->len_this_segment -= walk->len_this_page; 66 walk->offset += PAGE_SIZE - 1;
76 67 walk->offset &= PAGE_MASK;
77 if (walk->len_this_segment) { 68 if (walk->offset >= walk->sg->offset + walk->sg->length)
78 walk->page++;
79 walk->len_this_page = min(walk->len_this_segment,
80 (unsigned)PAGE_CACHE_SIZE);
81 walk->offset = 0;
82 }
83 else
84 scatterwalk_start(walk, sg_next(walk->sg)); 69 scatterwalk_start(walk, sg_next(walk->sg));
85 } 70 }
86} 71}
87 72
88void scatterwalk_done(struct scatter_walk *walk, int out, int more) 73void scatterwalk_done(struct scatter_walk *walk, int out, int more)
89{ 74{
90 scatterwalk_unmap(walk, out); 75 if (!offset_in_page(walk->offset) || !more)
91 if (walk->len_this_page == 0 || !more)
92 scatterwalk_pagedone(walk, out, more); 76 scatterwalk_pagedone(walk, out, more);
93} 77}
78EXPORT_SYMBOL_GPL(scatterwalk_done);
94 79
95/* 80void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
96 * Do not call this unless the total length of all of the fragments 81 size_t nbytes, int out)
97 * has been verified as multiple of the block size.
98 */
99int scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
100 size_t nbytes, int out)
101{ 82{
102 while (nbytes > walk->len_this_page) { 83 for (;;) {
103 memcpy_dir(buf, walk->data, walk->len_this_page, out); 84 unsigned int len_this_page = scatterwalk_pagelen(walk);
104 buf += walk->len_this_page; 85 u8 *vaddr;
105 nbytes -= walk->len_this_page; 86
87 if (len_this_page > nbytes)
88 len_this_page = nbytes;
89
90 vaddr = scatterwalk_map(walk, out);
91 memcpy_dir(buf, vaddr, len_this_page, out);
92 scatterwalk_unmap(vaddr, out);
93
94 if (nbytes == len_this_page)
95 break;
96
97 buf += len_this_page;
98 nbytes -= len_this_page;
106 99
107 scatterwalk_unmap(walk, out);
108 scatterwalk_pagedone(walk, out, 1); 100 scatterwalk_pagedone(walk, out, 1);
109 scatterwalk_map(walk, out);
110 } 101 }
111 102
112 memcpy_dir(buf, walk->data, nbytes, out); 103 scatterwalk_advance(walk, nbytes);
113 return nbytes;
114} 104}
105EXPORT_SYMBOL_GPL(scatterwalk_copychunks);
diff --git a/crypto/scatterwalk.h b/crypto/scatterwalk.h
index e79925c474a3..f1592cc2d0f4 100644
--- a/crypto/scatterwalk.h
+++ b/crypto/scatterwalk.h
@@ -14,45 +14,42 @@
14 14
15#ifndef _CRYPTO_SCATTERWALK_H 15#ifndef _CRYPTO_SCATTERWALK_H
16#define _CRYPTO_SCATTERWALK_H 16#define _CRYPTO_SCATTERWALK_H
17
17#include <linux/mm.h> 18#include <linux/mm.h>
18#include <asm/scatterlist.h> 19#include <linux/scatterlist.h>
19 20
20struct scatter_walk { 21#include "internal.h"
21 struct scatterlist *sg;
22 struct page *page;
23 void *data;
24 unsigned int len_this_page;
25 unsigned int len_this_segment;
26 unsigned int offset;
27};
28 22
29/* Define sg_next is an inline routine now in case we want to change
30 scatterlist to a linked list later. */
31static inline struct scatterlist *sg_next(struct scatterlist *sg) 23static inline struct scatterlist *sg_next(struct scatterlist *sg)
32{ 24{
33 return sg + 1; 25 return (++sg)->length ? sg : (void *)sg->page;
34} 26}
35 27
36static inline int scatterwalk_samebuf(struct scatter_walk *walk_in, 28static inline unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in,
37 struct scatter_walk *walk_out) 29 struct scatter_walk *walk_out)
38{ 30{
39 return walk_in->page == walk_out->page && 31 return !(((walk_in->sg->page - walk_out->sg->page) << PAGE_SHIFT) +
40 walk_in->offset == walk_out->offset; 32 (int)(walk_in->offset - walk_out->offset));
33}
34
35static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk)
36{
37 unsigned int len = walk->sg->offset + walk->sg->length - walk->offset;
38 unsigned int len_this_page = offset_in_page(~walk->offset) + 1;
39 return len_this_page > len ? len : len_this_page;
41} 40}
42 41
43static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk, 42static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk,
44 unsigned int nbytes) 43 unsigned int nbytes)
45{ 44{
46 return nbytes > walk->len_this_page ? walk->len_this_page : nbytes; 45 unsigned int len_this_page = scatterwalk_pagelen(walk);
46 return nbytes > len_this_page ? len_this_page : nbytes;
47} 47}
48 48
49static inline void scatterwalk_advance(struct scatter_walk *walk, 49static inline void scatterwalk_advance(struct scatter_walk *walk,
50 unsigned int nbytes) 50 unsigned int nbytes)
51{ 51{
52 walk->data += nbytes;
53 walk->offset += nbytes; 52 walk->offset += nbytes;
54 walk->len_this_page -= nbytes;
55 walk->len_this_segment -= nbytes;
56} 53}
57 54
58static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk, 55static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk,
@@ -61,9 +58,20 @@ static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk,
61 return !(walk->offset & alignmask); 58 return !(walk->offset & alignmask);
62} 59}
63 60
61static inline struct page *scatterwalk_page(struct scatter_walk *walk)
62{
63 return walk->sg->page + (walk->offset >> PAGE_SHIFT);
64}
65
66static inline void scatterwalk_unmap(void *vaddr, int out)
67{
68 crypto_kunmap(vaddr, out);
69}
70
64void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg); 71void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg);
65int scatterwalk_copychunks(void *buf, struct scatter_walk *walk, size_t nbytes, int out); 72void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
66void scatterwalk_map(struct scatter_walk *walk, int out); 73 size_t nbytes, int out);
74void *scatterwalk_map(struct scatter_walk *walk, int out);
67void scatterwalk_done(struct scatter_walk *walk, int out, int more); 75void scatterwalk_done(struct scatter_walk *walk, int out, int more);
68 76
69#endif /* _CRYPTO_SCATTERWALK_H */ 77#endif /* _CRYPTO_SCATTERWALK_H */
diff --git a/crypto/serpent.c b/crypto/serpent.c
index de60cdddbf4a..465d091cd3ec 100644
--- a/crypto/serpent.c
+++ b/crypto/serpent.c
@@ -216,7 +216,7 @@ struct serpent_ctx {
216 216
217 217
218static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, 218static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
219 unsigned int keylen, u32 *flags) 219 unsigned int keylen)
220{ 220{
221 struct serpent_ctx *ctx = crypto_tfm_ctx(tfm); 221 struct serpent_ctx *ctx = crypto_tfm_ctx(tfm);
222 u32 *k = ctx->expkey; 222 u32 *k = ctx->expkey;
@@ -224,13 +224,6 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
224 u32 r0,r1,r2,r3,r4; 224 u32 r0,r1,r2,r3,r4;
225 int i; 225 int i;
226 226
227 if ((keylen < SERPENT_MIN_KEY_SIZE)
228 || (keylen > SERPENT_MAX_KEY_SIZE))
229 {
230 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
231 return -EINVAL;
232 }
233
234 /* Copy key, add padding */ 227 /* Copy key, add padding */
235 228
236 for (i = 0; i < keylen; ++i) 229 for (i = 0; i < keylen; ++i)
@@ -497,21 +490,15 @@ static struct crypto_alg serpent_alg = {
497}; 490};
498 491
499static int tnepres_setkey(struct crypto_tfm *tfm, const u8 *key, 492static int tnepres_setkey(struct crypto_tfm *tfm, const u8 *key,
500 unsigned int keylen, u32 *flags) 493 unsigned int keylen)
501{ 494{
502 u8 rev_key[SERPENT_MAX_KEY_SIZE]; 495 u8 rev_key[SERPENT_MAX_KEY_SIZE];
503 int i; 496 int i;
504 497
505 if ((keylen < SERPENT_MIN_KEY_SIZE)
506 || (keylen > SERPENT_MAX_KEY_SIZE)) {
507 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
508 return -EINVAL;
509 }
510
511 for (i = 0; i < keylen; ++i) 498 for (i = 0; i < keylen; ++i)
512 rev_key[keylen - i - 1] = key[i]; 499 rev_key[keylen - i - 1] = key[i];
513 500
514 return serpent_setkey(tfm, rev_key, keylen, flags); 501 return serpent_setkey(tfm, rev_key, keylen);
515} 502}
516 503
517static void tnepres_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 504static void tnepres_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
diff --git a/crypto/sha1.c b/crypto/sha1.c
index 6c77b689f87e..1bba551e5b45 100644
--- a/crypto/sha1.c
+++ b/crypto/sha1.c
@@ -109,6 +109,7 @@ static void sha1_final(struct crypto_tfm *tfm, u8 *out)
109 109
110static struct crypto_alg alg = { 110static struct crypto_alg alg = {
111 .cra_name = "sha1", 111 .cra_name = "sha1",
112 .cra_driver_name= "sha1-generic",
112 .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 113 .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
113 .cra_blocksize = SHA1_HMAC_BLOCK_SIZE, 114 .cra_blocksize = SHA1_HMAC_BLOCK_SIZE,
114 .cra_ctxsize = sizeof(struct sha1_ctx), 115 .cra_ctxsize = sizeof(struct sha1_ctx),
@@ -137,3 +138,5 @@ module_exit(fini);
137 138
138MODULE_LICENSE("GPL"); 139MODULE_LICENSE("GPL");
139MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm"); 140MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
141
142MODULE_ALIAS("sha1-generic");
diff --git a/crypto/sha256.c b/crypto/sha256.c
index bc71d85a7d02..716195bb54f2 100644
--- a/crypto/sha256.c
+++ b/crypto/sha256.c
@@ -309,6 +309,7 @@ static void sha256_final(struct crypto_tfm *tfm, u8 *out)
309 309
310static struct crypto_alg alg = { 310static struct crypto_alg alg = {
311 .cra_name = "sha256", 311 .cra_name = "sha256",
312 .cra_driver_name= "sha256-generic",
312 .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 313 .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
313 .cra_blocksize = SHA256_HMAC_BLOCK_SIZE, 314 .cra_blocksize = SHA256_HMAC_BLOCK_SIZE,
314 .cra_ctxsize = sizeof(struct sha256_ctx), 315 .cra_ctxsize = sizeof(struct sha256_ctx),
@@ -337,3 +338,5 @@ module_exit(fini);
337 338
338MODULE_LICENSE("GPL"); 339MODULE_LICENSE("GPL");
339MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm"); 340MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm");
341
342MODULE_ALIAS("sha256-generic");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index e52f56c5bd5e..83307420d31c 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -17,6 +17,7 @@
17 * 17 *
18 */ 18 */
19 19
20#include <linux/err.h>
20#include <linux/init.h> 21#include <linux/init.h>
21#include <linux/module.h> 22#include <linux/module.h>
22#include <linux/mm.h> 23#include <linux/mm.h>
@@ -54,8 +55,6 @@
54*/ 55*/
55#define ENCRYPT 1 56#define ENCRYPT 1
56#define DECRYPT 0 57#define DECRYPT 0
57#define MODE_ECB 1
58#define MODE_CBC 0
59 58
60static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 }; 59static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
61 60
@@ -89,9 +88,11 @@ static void test_hash(char *algo, struct hash_testvec *template,
89 unsigned int i, j, k, temp; 88 unsigned int i, j, k, temp;
90 struct scatterlist sg[8]; 89 struct scatterlist sg[8];
91 char result[64]; 90 char result[64];
92 struct crypto_tfm *tfm; 91 struct crypto_hash *tfm;
92 struct hash_desc desc;
93 struct hash_testvec *hash_tv; 93 struct hash_testvec *hash_tv;
94 unsigned int tsize; 94 unsigned int tsize;
95 int ret;
95 96
96 printk("\ntesting %s\n", algo); 97 printk("\ntesting %s\n", algo);
97 98
@@ -105,30 +106,42 @@ static void test_hash(char *algo, struct hash_testvec *template,
105 106
106 memcpy(tvmem, template, tsize); 107 memcpy(tvmem, template, tsize);
107 hash_tv = (void *)tvmem; 108 hash_tv = (void *)tvmem;
108 tfm = crypto_alloc_tfm(algo, 0); 109
109 if (tfm == NULL) { 110 tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC);
110 printk("failed to load transform for %s\n", algo); 111 if (IS_ERR(tfm)) {
112 printk("failed to load transform for %s: %ld\n", algo,
113 PTR_ERR(tfm));
111 return; 114 return;
112 } 115 }
113 116
117 desc.tfm = tfm;
118 desc.flags = 0;
119
114 for (i = 0; i < tcount; i++) { 120 for (i = 0; i < tcount; i++) {
115 printk("test %u:\n", i + 1); 121 printk("test %u:\n", i + 1);
116 memset(result, 0, 64); 122 memset(result, 0, 64);
117 123
118 sg_set_buf(&sg[0], hash_tv[i].plaintext, hash_tv[i].psize); 124 sg_set_buf(&sg[0], hash_tv[i].plaintext, hash_tv[i].psize);
119 125
120 crypto_digest_init(tfm); 126 if (hash_tv[i].ksize) {
121 if (tfm->crt_u.digest.dit_setkey) { 127 ret = crypto_hash_setkey(tfm, hash_tv[i].key,
122 crypto_digest_setkey(tfm, hash_tv[i].key, 128 hash_tv[i].ksize);
123 hash_tv[i].ksize); 129 if (ret) {
130 printk("setkey() failed ret=%d\n", ret);
131 goto out;
132 }
133 }
134
135 ret = crypto_hash_digest(&desc, sg, hash_tv[i].psize, result);
136 if (ret) {
137 printk("digest () failed ret=%d\n", ret);
138 goto out;
124 } 139 }
125 crypto_digest_update(tfm, sg, 1);
126 crypto_digest_final(tfm, result);
127 140
128 hexdump(result, crypto_tfm_alg_digestsize(tfm)); 141 hexdump(result, crypto_hash_digestsize(tfm));
129 printk("%s\n", 142 printk("%s\n",
130 memcmp(result, hash_tv[i].digest, 143 memcmp(result, hash_tv[i].digest,
131 crypto_tfm_alg_digestsize(tfm)) ? 144 crypto_hash_digestsize(tfm)) ?
132 "fail" : "pass"); 145 "fail" : "pass");
133 } 146 }
134 147
@@ -154,127 +167,56 @@ static void test_hash(char *algo, struct hash_testvec *template,
154 hash_tv[i].tap[k]); 167 hash_tv[i].tap[k]);
155 } 168 }
156 169
157 crypto_digest_digest(tfm, sg, hash_tv[i].np, result); 170 if (hash_tv[i].ksize) {
158 171 ret = crypto_hash_setkey(tfm, hash_tv[i].key,
159 hexdump(result, crypto_tfm_alg_digestsize(tfm)); 172 hash_tv[i].ksize);
160 printk("%s\n",
161 memcmp(result, hash_tv[i].digest,
162 crypto_tfm_alg_digestsize(tfm)) ?
163 "fail" : "pass");
164 }
165 }
166
167 crypto_free_tfm(tfm);
168}
169
170
171#ifdef CONFIG_CRYPTO_HMAC
172
173static void test_hmac(char *algo, struct hmac_testvec *template,
174 unsigned int tcount)
175{
176 unsigned int i, j, k, temp;
177 struct scatterlist sg[8];
178 char result[64];
179 struct crypto_tfm *tfm;
180 struct hmac_testvec *hmac_tv;
181 unsigned int tsize, klen;
182
183 tfm = crypto_alloc_tfm(algo, 0);
184 if (tfm == NULL) {
185 printk("failed to load transform for %s\n", algo);
186 return;
187 }
188
189 printk("\ntesting hmac_%s\n", algo);
190
191 tsize = sizeof(struct hmac_testvec);
192 tsize *= tcount;
193 if (tsize > TVMEMSIZE) {
194 printk("template (%u) too big for tvmem (%u)\n", tsize,
195 TVMEMSIZE);
196 goto out;
197 }
198
199 memcpy(tvmem, template, tsize);
200 hmac_tv = (void *)tvmem;
201
202 for (i = 0; i < tcount; i++) {
203 printk("test %u:\n", i + 1);
204 memset(result, 0, sizeof (result));
205
206 klen = hmac_tv[i].ksize;
207 sg_set_buf(&sg[0], hmac_tv[i].plaintext, hmac_tv[i].psize);
208
209 crypto_hmac(tfm, hmac_tv[i].key, &klen, sg, 1, result);
210 173
211 hexdump(result, crypto_tfm_alg_digestsize(tfm)); 174 if (ret) {
212 printk("%s\n", 175 printk("setkey() failed ret=%d\n", ret);
213 memcmp(result, hmac_tv[i].digest, 176 goto out;
214 crypto_tfm_alg_digestsize(tfm)) ? "fail" : 177 }
215 "pass");
216 }
217
218 printk("\ntesting hmac_%s across pages\n", algo);
219
220 memset(xbuf, 0, XBUFSIZE);
221
222 j = 0;
223 for (i = 0; i < tcount; i++) {
224 if (hmac_tv[i].np) {
225 j++;
226 printk("test %u:\n",j);
227 memset(result, 0, 64);
228
229 temp = 0;
230 klen = hmac_tv[i].ksize;
231 for (k = 0; k < hmac_tv[i].np; k++) {
232 memcpy(&xbuf[IDX[k]],
233 hmac_tv[i].plaintext + temp,
234 hmac_tv[i].tap[k]);
235 temp += hmac_tv[i].tap[k];
236 sg_set_buf(&sg[k], &xbuf[IDX[k]],
237 hmac_tv[i].tap[k]);
238 } 178 }
239 179
240 crypto_hmac(tfm, hmac_tv[i].key, &klen, sg, 180 ret = crypto_hash_digest(&desc, sg, hash_tv[i].psize,
241 hmac_tv[i].np, result); 181 result);
242 hexdump(result, crypto_tfm_alg_digestsize(tfm)); 182 if (ret) {
183 printk("digest () failed ret=%d\n", ret);
184 goto out;
185 }
243 186
187 hexdump(result, crypto_hash_digestsize(tfm));
244 printk("%s\n", 188 printk("%s\n",
245 memcmp(result, hmac_tv[i].digest, 189 memcmp(result, hash_tv[i].digest,
246 crypto_tfm_alg_digestsize(tfm)) ? 190 crypto_hash_digestsize(tfm)) ?
247 "fail" : "pass"); 191 "fail" : "pass");
248 } 192 }
249 } 193 }
194
250out: 195out:
251 crypto_free_tfm(tfm); 196 crypto_free_hash(tfm);
252} 197}
253 198
254#endif /* CONFIG_CRYPTO_HMAC */ 199static void test_cipher(char *algo, int enc,
255
256static void test_cipher(char *algo, int mode, int enc,
257 struct cipher_testvec *template, unsigned int tcount) 200 struct cipher_testvec *template, unsigned int tcount)
258{ 201{
259 unsigned int ret, i, j, k, temp; 202 unsigned int ret, i, j, k, temp;
260 unsigned int tsize; 203 unsigned int tsize;
204 unsigned int iv_len;
205 unsigned int len;
261 char *q; 206 char *q;
262 struct crypto_tfm *tfm; 207 struct crypto_blkcipher *tfm;
263 char *key; 208 char *key;
264 struct cipher_testvec *cipher_tv; 209 struct cipher_testvec *cipher_tv;
210 struct blkcipher_desc desc;
265 struct scatterlist sg[8]; 211 struct scatterlist sg[8];
266 const char *e, *m; 212 const char *e;
267 213
268 if (enc == ENCRYPT) 214 if (enc == ENCRYPT)
269 e = "encryption"; 215 e = "encryption";
270 else 216 else
271 e = "decryption"; 217 e = "decryption";
272 if (mode == MODE_ECB)
273 m = "ECB";
274 else
275 m = "CBC";
276 218
277 printk("\ntesting %s %s %s\n", algo, m, e); 219 printk("\ntesting %s %s\n", algo, e);
278 220
279 tsize = sizeof (struct cipher_testvec); 221 tsize = sizeof (struct cipher_testvec);
280 tsize *= tcount; 222 tsize *= tcount;
@@ -288,15 +230,15 @@ static void test_cipher(char *algo, int mode, int enc,
288 memcpy(tvmem, template, tsize); 230 memcpy(tvmem, template, tsize);
289 cipher_tv = (void *)tvmem; 231 cipher_tv = (void *)tvmem;
290 232
291 if (mode) 233 tfm = crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC);
292 tfm = crypto_alloc_tfm(algo, 0);
293 else
294 tfm = crypto_alloc_tfm(algo, CRYPTO_TFM_MODE_CBC);
295 234
296 if (tfm == NULL) { 235 if (IS_ERR(tfm)) {
297 printk("failed to load transform for %s %s\n", algo, m); 236 printk("failed to load transform for %s: %ld\n", algo,
237 PTR_ERR(tfm));
298 return; 238 return;
299 } 239 }
240 desc.tfm = tfm;
241 desc.flags = 0;
300 242
301 j = 0; 243 j = 0;
302 for (i = 0; i < tcount; i++) { 244 for (i = 0; i < tcount; i++) {
@@ -305,14 +247,17 @@ static void test_cipher(char *algo, int mode, int enc,
305 printk("test %u (%d bit key):\n", 247 printk("test %u (%d bit key):\n",
306 j, cipher_tv[i].klen * 8); 248 j, cipher_tv[i].klen * 8);
307 249
308 tfm->crt_flags = 0; 250 crypto_blkcipher_clear_flags(tfm, ~0);
309 if (cipher_tv[i].wk) 251 if (cipher_tv[i].wk)
310 tfm->crt_flags |= CRYPTO_TFM_REQ_WEAK_KEY; 252 crypto_blkcipher_set_flags(
253 tfm, CRYPTO_TFM_REQ_WEAK_KEY);
311 key = cipher_tv[i].key; 254 key = cipher_tv[i].key;
312 255
313 ret = crypto_cipher_setkey(tfm, key, cipher_tv[i].klen); 256 ret = crypto_blkcipher_setkey(tfm, key,
257 cipher_tv[i].klen);
314 if (ret) { 258 if (ret) {
315 printk("setkey() failed flags=%x\n", tfm->crt_flags); 259 printk("setkey() failed flags=%x\n",
260 crypto_blkcipher_get_flags(tfm));
316 261
317 if (!cipher_tv[i].fail) 262 if (!cipher_tv[i].fail)
318 goto out; 263 goto out;
@@ -321,19 +266,19 @@ static void test_cipher(char *algo, int mode, int enc,
321 sg_set_buf(&sg[0], cipher_tv[i].input, 266 sg_set_buf(&sg[0], cipher_tv[i].input,
322 cipher_tv[i].ilen); 267 cipher_tv[i].ilen);
323 268
324 if (!mode) { 269 iv_len = crypto_blkcipher_ivsize(tfm);
325 crypto_cipher_set_iv(tfm, cipher_tv[i].iv, 270 if (iv_len)
326 crypto_tfm_alg_ivsize(tfm)); 271 crypto_blkcipher_set_iv(tfm, cipher_tv[i].iv,
327 } 272 iv_len);
328
329 if (enc)
330 ret = crypto_cipher_encrypt(tfm, sg, sg, cipher_tv[i].ilen);
331 else
332 ret = crypto_cipher_decrypt(tfm, sg, sg, cipher_tv[i].ilen);
333 273
274 len = cipher_tv[i].ilen;
275 ret = enc ?
276 crypto_blkcipher_encrypt(&desc, sg, sg, len) :
277 crypto_blkcipher_decrypt(&desc, sg, sg, len);
334 278
335 if (ret) { 279 if (ret) {
336 printk("%s () failed flags=%x\n", e, tfm->crt_flags); 280 printk("%s () failed flags=%x\n", e,
281 desc.flags);
337 goto out; 282 goto out;
338 } 283 }
339 284
@@ -346,7 +291,7 @@ static void test_cipher(char *algo, int mode, int enc,
346 } 291 }
347 } 292 }
348 293
349 printk("\ntesting %s %s %s across pages (chunking)\n", algo, m, e); 294 printk("\ntesting %s %s across pages (chunking)\n", algo, e);
350 memset(xbuf, 0, XBUFSIZE); 295 memset(xbuf, 0, XBUFSIZE);
351 296
352 j = 0; 297 j = 0;
@@ -356,14 +301,17 @@ static void test_cipher(char *algo, int mode, int enc,
356 printk("test %u (%d bit key):\n", 301 printk("test %u (%d bit key):\n",
357 j, cipher_tv[i].klen * 8); 302 j, cipher_tv[i].klen * 8);
358 303
359 tfm->crt_flags = 0; 304 crypto_blkcipher_clear_flags(tfm, ~0);
360 if (cipher_tv[i].wk) 305 if (cipher_tv[i].wk)
361 tfm->crt_flags |= CRYPTO_TFM_REQ_WEAK_KEY; 306 crypto_blkcipher_set_flags(
307 tfm, CRYPTO_TFM_REQ_WEAK_KEY);
362 key = cipher_tv[i].key; 308 key = cipher_tv[i].key;
363 309
364 ret = crypto_cipher_setkey(tfm, key, cipher_tv[i].klen); 310 ret = crypto_blkcipher_setkey(tfm, key,
311 cipher_tv[i].klen);
365 if (ret) { 312 if (ret) {
366 printk("setkey() failed flags=%x\n", tfm->crt_flags); 313 printk("setkey() failed flags=%x\n",
314 crypto_blkcipher_get_flags(tfm));
367 315
368 if (!cipher_tv[i].fail) 316 if (!cipher_tv[i].fail)
369 goto out; 317 goto out;
@@ -379,18 +327,19 @@ static void test_cipher(char *algo, int mode, int enc,
379 cipher_tv[i].tap[k]); 327 cipher_tv[i].tap[k]);
380 } 328 }
381 329
382 if (!mode) { 330 iv_len = crypto_blkcipher_ivsize(tfm);
383 crypto_cipher_set_iv(tfm, cipher_tv[i].iv, 331 if (iv_len)
384 crypto_tfm_alg_ivsize(tfm)); 332 crypto_blkcipher_set_iv(tfm, cipher_tv[i].iv,
385 } 333 iv_len);
386 334
387 if (enc) 335 len = cipher_tv[i].ilen;
388 ret = crypto_cipher_encrypt(tfm, sg, sg, cipher_tv[i].ilen); 336 ret = enc ?
389 else 337 crypto_blkcipher_encrypt(&desc, sg, sg, len) :
390 ret = crypto_cipher_decrypt(tfm, sg, sg, cipher_tv[i].ilen); 338 crypto_blkcipher_decrypt(&desc, sg, sg, len);
391 339
392 if (ret) { 340 if (ret) {
393 printk("%s () failed flags=%x\n", e, tfm->crt_flags); 341 printk("%s () failed flags=%x\n", e,
342 desc.flags);
394 goto out; 343 goto out;
395 } 344 }
396 345
@@ -409,10 +358,10 @@ static void test_cipher(char *algo, int mode, int enc,
409 } 358 }
410 359
411out: 360out:
412 crypto_free_tfm(tfm); 361 crypto_free_blkcipher(tfm);
413} 362}
414 363
415static int test_cipher_jiffies(struct crypto_tfm *tfm, int enc, char *p, 364static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc, char *p,
416 int blen, int sec) 365 int blen, int sec)
417{ 366{
418 struct scatterlist sg[1]; 367 struct scatterlist sg[1];
@@ -425,9 +374,9 @@ static int test_cipher_jiffies(struct crypto_tfm *tfm, int enc, char *p,
425 for (start = jiffies, end = start + sec * HZ, bcount = 0; 374 for (start = jiffies, end = start + sec * HZ, bcount = 0;
426 time_before(jiffies, end); bcount++) { 375 time_before(jiffies, end); bcount++) {
427 if (enc) 376 if (enc)
428 ret = crypto_cipher_encrypt(tfm, sg, sg, blen); 377 ret = crypto_blkcipher_encrypt(desc, sg, sg, blen);
429 else 378 else
430 ret = crypto_cipher_decrypt(tfm, sg, sg, blen); 379 ret = crypto_blkcipher_decrypt(desc, sg, sg, blen);
431 380
432 if (ret) 381 if (ret)
433 return ret; 382 return ret;
@@ -438,7 +387,7 @@ static int test_cipher_jiffies(struct crypto_tfm *tfm, int enc, char *p,
438 return 0; 387 return 0;
439} 388}
440 389
441static int test_cipher_cycles(struct crypto_tfm *tfm, int enc, char *p, 390static int test_cipher_cycles(struct blkcipher_desc *desc, int enc, char *p,
442 int blen) 391 int blen)
443{ 392{
444 struct scatterlist sg[1]; 393 struct scatterlist sg[1];
@@ -454,9 +403,9 @@ static int test_cipher_cycles(struct crypto_tfm *tfm, int enc, char *p,
454 /* Warm-up run. */ 403 /* Warm-up run. */
455 for (i = 0; i < 4; i++) { 404 for (i = 0; i < 4; i++) {
456 if (enc) 405 if (enc)
457 ret = crypto_cipher_encrypt(tfm, sg, sg, blen); 406 ret = crypto_blkcipher_encrypt(desc, sg, sg, blen);
458 else 407 else
459 ret = crypto_cipher_decrypt(tfm, sg, sg, blen); 408 ret = crypto_blkcipher_decrypt(desc, sg, sg, blen);
460 409
461 if (ret) 410 if (ret)
462 goto out; 411 goto out;
@@ -468,9 +417,9 @@ static int test_cipher_cycles(struct crypto_tfm *tfm, int enc, char *p,
468 417
469 start = get_cycles(); 418 start = get_cycles();
470 if (enc) 419 if (enc)
471 ret = crypto_cipher_encrypt(tfm, sg, sg, blen); 420 ret = crypto_blkcipher_encrypt(desc, sg, sg, blen);
472 else 421 else
473 ret = crypto_cipher_decrypt(tfm, sg, sg, blen); 422 ret = crypto_blkcipher_decrypt(desc, sg, sg, blen);
474 end = get_cycles(); 423 end = get_cycles();
475 424
476 if (ret) 425 if (ret)
@@ -490,35 +439,32 @@ out:
490 return ret; 439 return ret;
491} 440}
492 441
493static void test_cipher_speed(char *algo, int mode, int enc, unsigned int sec, 442static void test_cipher_speed(char *algo, int enc, unsigned int sec,
494 struct cipher_testvec *template, 443 struct cipher_testvec *template,
495 unsigned int tcount, struct cipher_speed *speed) 444 unsigned int tcount, struct cipher_speed *speed)
496{ 445{
497 unsigned int ret, i, j, iv_len; 446 unsigned int ret, i, j, iv_len;
498 unsigned char *key, *p, iv[128]; 447 unsigned char *key, *p, iv[128];
499 struct crypto_tfm *tfm; 448 struct crypto_blkcipher *tfm;
500 const char *e, *m; 449 struct blkcipher_desc desc;
450 const char *e;
501 451
502 if (enc == ENCRYPT) 452 if (enc == ENCRYPT)
503 e = "encryption"; 453 e = "encryption";
504 else 454 else
505 e = "decryption"; 455 e = "decryption";
506 if (mode == MODE_ECB)
507 m = "ECB";
508 else
509 m = "CBC";
510 456
511 printk("\ntesting speed of %s %s %s\n", algo, m, e); 457 printk("\ntesting speed of %s %s\n", algo, e);
512 458
513 if (mode) 459 tfm = crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC);
514 tfm = crypto_alloc_tfm(algo, 0);
515 else
516 tfm = crypto_alloc_tfm(algo, CRYPTO_TFM_MODE_CBC);
517 460
518 if (tfm == NULL) { 461 if (IS_ERR(tfm)) {
519 printk("failed to load transform for %s %s\n", algo, m); 462 printk("failed to load transform for %s: %ld\n", algo,
463 PTR_ERR(tfm));
520 return; 464 return;
521 } 465 }
466 desc.tfm = tfm;
467 desc.flags = 0;
522 468
523 for (i = 0; speed[i].klen != 0; i++) { 469 for (i = 0; speed[i].klen != 0; i++) {
524 if ((speed[i].blen + speed[i].klen) > TVMEMSIZE) { 470 if ((speed[i].blen + speed[i].klen) > TVMEMSIZE) {
@@ -542,125 +488,231 @@ static void test_cipher_speed(char *algo, int mode, int enc, unsigned int sec,
542 } 488 }
543 p = (unsigned char *)tvmem + speed[i].klen; 489 p = (unsigned char *)tvmem + speed[i].klen;
544 490
545 ret = crypto_cipher_setkey(tfm, key, speed[i].klen); 491 ret = crypto_blkcipher_setkey(tfm, key, speed[i].klen);
546 if (ret) { 492 if (ret) {
547 printk("setkey() failed flags=%x\n", tfm->crt_flags); 493 printk("setkey() failed flags=%x\n",
494 crypto_blkcipher_get_flags(tfm));
548 goto out; 495 goto out;
549 } 496 }
550 497
551 if (!mode) { 498 iv_len = crypto_blkcipher_ivsize(tfm);
552 iv_len = crypto_tfm_alg_ivsize(tfm); 499 if (iv_len) {
553 memset(&iv, 0xff, iv_len); 500 memset(&iv, 0xff, iv_len);
554 crypto_cipher_set_iv(tfm, iv, iv_len); 501 crypto_blkcipher_set_iv(tfm, iv, iv_len);
555 } 502 }
556 503
557 if (sec) 504 if (sec)
558 ret = test_cipher_jiffies(tfm, enc, p, speed[i].blen, 505 ret = test_cipher_jiffies(&desc, enc, p, speed[i].blen,
559 sec); 506 sec);
560 else 507 else
561 ret = test_cipher_cycles(tfm, enc, p, speed[i].blen); 508 ret = test_cipher_cycles(&desc, enc, p, speed[i].blen);
562 509
563 if (ret) { 510 if (ret) {
564 printk("%s() failed flags=%x\n", e, tfm->crt_flags); 511 printk("%s() failed flags=%x\n", e, desc.flags);
565 break; 512 break;
566 } 513 }
567 } 514 }
568 515
569out: 516out:
570 crypto_free_tfm(tfm); 517 crypto_free_blkcipher(tfm);
571} 518}
572 519
573static void test_digest_jiffies(struct crypto_tfm *tfm, char *p, int blen, 520static int test_hash_jiffies_digest(struct hash_desc *desc, char *p, int blen,
574 int plen, char *out, int sec) 521 char *out, int sec)
522{
523 struct scatterlist sg[1];
524 unsigned long start, end;
525 int bcount;
526 int ret;
527
528 for (start = jiffies, end = start + sec * HZ, bcount = 0;
529 time_before(jiffies, end); bcount++) {
530 sg_set_buf(sg, p, blen);
531 ret = crypto_hash_digest(desc, sg, blen, out);
532 if (ret)
533 return ret;
534 }
535
536 printk("%6u opers/sec, %9lu bytes/sec\n",
537 bcount / sec, ((long)bcount * blen) / sec);
538
539 return 0;
540}
541
542static int test_hash_jiffies(struct hash_desc *desc, char *p, int blen,
543 int plen, char *out, int sec)
575{ 544{
576 struct scatterlist sg[1]; 545 struct scatterlist sg[1];
577 unsigned long start, end; 546 unsigned long start, end;
578 int bcount, pcount; 547 int bcount, pcount;
548 int ret;
549
550 if (plen == blen)
551 return test_hash_jiffies_digest(desc, p, blen, out, sec);
579 552
580 for (start = jiffies, end = start + sec * HZ, bcount = 0; 553 for (start = jiffies, end = start + sec * HZ, bcount = 0;
581 time_before(jiffies, end); bcount++) { 554 time_before(jiffies, end); bcount++) {
582 crypto_digest_init(tfm); 555 ret = crypto_hash_init(desc);
556 if (ret)
557 return ret;
583 for (pcount = 0; pcount < blen; pcount += plen) { 558 for (pcount = 0; pcount < blen; pcount += plen) {
584 sg_set_buf(sg, p + pcount, plen); 559 sg_set_buf(sg, p + pcount, plen);
585 crypto_digest_update(tfm, sg, 1); 560 ret = crypto_hash_update(desc, sg, plen);
561 if (ret)
562 return ret;
586 } 563 }
587 /* we assume there is enough space in 'out' for the result */ 564 /* we assume there is enough space in 'out' for the result */
588 crypto_digest_final(tfm, out); 565 ret = crypto_hash_final(desc, out);
566 if (ret)
567 return ret;
589 } 568 }
590 569
591 printk("%6u opers/sec, %9lu bytes/sec\n", 570 printk("%6u opers/sec, %9lu bytes/sec\n",
592 bcount / sec, ((long)bcount * blen) / sec); 571 bcount / sec, ((long)bcount * blen) / sec);
593 572
594 return; 573 return 0;
574}
575
576static int test_hash_cycles_digest(struct hash_desc *desc, char *p, int blen,
577 char *out)
578{
579 struct scatterlist sg[1];
580 unsigned long cycles = 0;
581 int i;
582 int ret;
583
584 local_bh_disable();
585 local_irq_disable();
586
587 /* Warm-up run. */
588 for (i = 0; i < 4; i++) {
589 sg_set_buf(sg, p, blen);
590 ret = crypto_hash_digest(desc, sg, blen, out);
591 if (ret)
592 goto out;
593 }
594
595 /* The real thing. */
596 for (i = 0; i < 8; i++) {
597 cycles_t start, end;
598
599 start = get_cycles();
600
601 sg_set_buf(sg, p, blen);
602 ret = crypto_hash_digest(desc, sg, blen, out);
603 if (ret)
604 goto out;
605
606 end = get_cycles();
607
608 cycles += end - start;
609 }
610
611out:
612 local_irq_enable();
613 local_bh_enable();
614
615 if (ret)
616 return ret;
617
618 printk("%6lu cycles/operation, %4lu cycles/byte\n",
619 cycles / 8, cycles / (8 * blen));
620
621 return 0;
595} 622}
596 623
597static void test_digest_cycles(struct crypto_tfm *tfm, char *p, int blen, 624static int test_hash_cycles(struct hash_desc *desc, char *p, int blen,
598 int plen, char *out) 625 int plen, char *out)
599{ 626{
600 struct scatterlist sg[1]; 627 struct scatterlist sg[1];
601 unsigned long cycles = 0; 628 unsigned long cycles = 0;
602 int i, pcount; 629 int i, pcount;
630 int ret;
631
632 if (plen == blen)
633 return test_hash_cycles_digest(desc, p, blen, out);
603 634
604 local_bh_disable(); 635 local_bh_disable();
605 local_irq_disable(); 636 local_irq_disable();
606 637
607 /* Warm-up run. */ 638 /* Warm-up run. */
608 for (i = 0; i < 4; i++) { 639 for (i = 0; i < 4; i++) {
609 crypto_digest_init(tfm); 640 ret = crypto_hash_init(desc);
641 if (ret)
642 goto out;
610 for (pcount = 0; pcount < blen; pcount += plen) { 643 for (pcount = 0; pcount < blen; pcount += plen) {
611 sg_set_buf(sg, p + pcount, plen); 644 sg_set_buf(sg, p + pcount, plen);
612 crypto_digest_update(tfm, sg, 1); 645 ret = crypto_hash_update(desc, sg, plen);
646 if (ret)
647 goto out;
613 } 648 }
614 crypto_digest_final(tfm, out); 649 crypto_hash_final(desc, out);
650 if (ret)
651 goto out;
615 } 652 }
616 653
617 /* The real thing. */ 654 /* The real thing. */
618 for (i = 0; i < 8; i++) { 655 for (i = 0; i < 8; i++) {
619 cycles_t start, end; 656 cycles_t start, end;
620 657
621 crypto_digest_init(tfm);
622
623 start = get_cycles(); 658 start = get_cycles();
624 659
660 ret = crypto_hash_init(desc);
661 if (ret)
662 goto out;
625 for (pcount = 0; pcount < blen; pcount += plen) { 663 for (pcount = 0; pcount < blen; pcount += plen) {
626 sg_set_buf(sg, p + pcount, plen); 664 sg_set_buf(sg, p + pcount, plen);
627 crypto_digest_update(tfm, sg, 1); 665 ret = crypto_hash_update(desc, sg, plen);
666 if (ret)
667 goto out;
628 } 668 }
629 crypto_digest_final(tfm, out); 669 ret = crypto_hash_final(desc, out);
670 if (ret)
671 goto out;
630 672
631 end = get_cycles(); 673 end = get_cycles();
632 674
633 cycles += end - start; 675 cycles += end - start;
634 } 676 }
635 677
678out:
636 local_irq_enable(); 679 local_irq_enable();
637 local_bh_enable(); 680 local_bh_enable();
638 681
682 if (ret)
683 return ret;
684
639 printk("%6lu cycles/operation, %4lu cycles/byte\n", 685 printk("%6lu cycles/operation, %4lu cycles/byte\n",
640 cycles / 8, cycles / (8 * blen)); 686 cycles / 8, cycles / (8 * blen));
641 687
642 return; 688 return 0;
643} 689}
644 690
645static void test_digest_speed(char *algo, unsigned int sec, 691static void test_hash_speed(char *algo, unsigned int sec,
646 struct digest_speed *speed) 692 struct hash_speed *speed)
647{ 693{
648 struct crypto_tfm *tfm; 694 struct crypto_hash *tfm;
695 struct hash_desc desc;
649 char output[1024]; 696 char output[1024];
650 int i; 697 int i;
698 int ret;
651 699
652 printk("\ntesting speed of %s\n", algo); 700 printk("\ntesting speed of %s\n", algo);
653 701
654 tfm = crypto_alloc_tfm(algo, 0); 702 tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC);
655 703
656 if (tfm == NULL) { 704 if (IS_ERR(tfm)) {
657 printk("failed to load transform for %s\n", algo); 705 printk("failed to load transform for %s: %ld\n", algo,
706 PTR_ERR(tfm));
658 return; 707 return;
659 } 708 }
660 709
661 if (crypto_tfm_alg_digestsize(tfm) > sizeof(output)) { 710 desc.tfm = tfm;
711 desc.flags = 0;
712
713 if (crypto_hash_digestsize(tfm) > sizeof(output)) {
662 printk("digestsize(%u) > outputbuffer(%zu)\n", 714 printk("digestsize(%u) > outputbuffer(%zu)\n",
663 crypto_tfm_alg_digestsize(tfm), sizeof(output)); 715 crypto_hash_digestsize(tfm), sizeof(output));
664 goto out; 716 goto out;
665 } 717 }
666 718
@@ -677,20 +729,27 @@ static void test_digest_speed(char *algo, unsigned int sec,
677 memset(tvmem, 0xff, speed[i].blen); 729 memset(tvmem, 0xff, speed[i].blen);
678 730
679 if (sec) 731 if (sec)
680 test_digest_jiffies(tfm, tvmem, speed[i].blen, speed[i].plen, output, sec); 732 ret = test_hash_jiffies(&desc, tvmem, speed[i].blen,
733 speed[i].plen, output, sec);
681 else 734 else
682 test_digest_cycles(tfm, tvmem, speed[i].blen, speed[i].plen, output); 735 ret = test_hash_cycles(&desc, tvmem, speed[i].blen,
736 speed[i].plen, output);
737
738 if (ret) {
739 printk("hashing failed ret=%d\n", ret);
740 break;
741 }
683 } 742 }
684 743
685out: 744out:
686 crypto_free_tfm(tfm); 745 crypto_free_hash(tfm);
687} 746}
688 747
689static void test_deflate(void) 748static void test_deflate(void)
690{ 749{
691 unsigned int i; 750 unsigned int i;
692 char result[COMP_BUF_SIZE]; 751 char result[COMP_BUF_SIZE];
693 struct crypto_tfm *tfm; 752 struct crypto_comp *tfm;
694 struct comp_testvec *tv; 753 struct comp_testvec *tv;
695 unsigned int tsize; 754 unsigned int tsize;
696 755
@@ -762,105 +821,7 @@ static void test_deflate(void)
762 ilen, dlen); 821 ilen, dlen);
763 } 822 }
764out: 823out:
765 crypto_free_tfm(tfm); 824 crypto_free_comp(tfm);
766}
767
768static void test_crc32c(void)
769{
770#define NUMVEC 6
771#define VECSIZE 40
772
773 int i, j, pass;
774 u32 crc;
775 u8 b, test_vec[NUMVEC][VECSIZE];
776 static u32 vec_results[NUMVEC] = {
777 0x0e2c157f, 0xe980ebf6, 0xde74bded,
778 0xd579c862, 0xba979ad0, 0x2b29d913
779 };
780 static u32 tot_vec_results = 0x24c5d375;
781
782 struct scatterlist sg[NUMVEC];
783 struct crypto_tfm *tfm;
784 char *fmtdata = "testing crc32c initialized to %08x: %s\n";
785#define SEEDTESTVAL 0xedcba987
786 u32 seed;
787
788 printk("\ntesting crc32c\n");
789
790 tfm = crypto_alloc_tfm("crc32c", 0);
791 if (tfm == NULL) {
792 printk("failed to load transform for crc32c\n");
793 return;
794 }
795
796 crypto_digest_init(tfm);
797 crypto_digest_final(tfm, (u8*)&crc);
798 printk(fmtdata, crc, (crc == 0) ? "pass" : "ERROR");
799
800 /*
801 * stuff test_vec with known values, simple incrementing
802 * byte values.
803 */
804 b = 0;
805 for (i = 0; i < NUMVEC; i++) {
806 for (j = 0; j < VECSIZE; j++)
807 test_vec[i][j] = ++b;
808 sg_set_buf(&sg[i], test_vec[i], VECSIZE);
809 }
810
811 seed = SEEDTESTVAL;
812 (void)crypto_digest_setkey(tfm, (const u8*)&seed, sizeof(u32));
813 crypto_digest_final(tfm, (u8*)&crc);
814 printk("testing crc32c setkey returns %08x : %s\n", crc, (crc == (SEEDTESTVAL ^ ~(u32)0)) ?
815 "pass" : "ERROR");
816
817 printk("testing crc32c using update/final:\n");
818
819 pass = 1; /* assume all is well */
820
821 for (i = 0; i < NUMVEC; i++) {
822 seed = ~(u32)0;
823 (void)crypto_digest_setkey(tfm, (const u8*)&seed, sizeof(u32));
824 crypto_digest_update(tfm, &sg[i], 1);
825 crypto_digest_final(tfm, (u8*)&crc);
826 if (crc == vec_results[i]) {
827 printk(" %08x:OK", crc);
828 } else {
829 printk(" %08x:BAD, wanted %08x\n", crc, vec_results[i]);
830 pass = 0;
831 }
832 }
833
834 printk("\ntesting crc32c using incremental accumulator:\n");
835 crc = 0;
836 for (i = 0; i < NUMVEC; i++) {
837 seed = (crc ^ ~(u32)0);
838 (void)crypto_digest_setkey(tfm, (const u8*)&seed, sizeof(u32));
839 crypto_digest_update(tfm, &sg[i], 1);
840 crypto_digest_final(tfm, (u8*)&crc);
841 }
842 if (crc == tot_vec_results) {
843 printk(" %08x:OK", crc);
844 } else {
845 printk(" %08x:BAD, wanted %08x\n", crc, tot_vec_results);
846 pass = 0;
847 }
848
849 printk("\ntesting crc32c using digest:\n");
850 seed = ~(u32)0;
851 (void)crypto_digest_setkey(tfm, (const u8*)&seed, sizeof(u32));
852 crypto_digest_digest(tfm, sg, NUMVEC, (u8*)&crc);
853 if (crc == tot_vec_results) {
854 printk(" %08x:OK", crc);
855 } else {
856 printk(" %08x:BAD, wanted %08x\n", crc, tot_vec_results);
857 pass = 0;
858 }
859
860 printk("\n%s\n", pass ? "pass" : "ERROR");
861
862 crypto_free_tfm(tfm);
863 printk("crc32c test complete\n");
864} 825}
865 826
866static void test_available(void) 827static void test_available(void)
@@ -869,8 +830,8 @@ static void test_available(void)
869 830
870 while (*name) { 831 while (*name) {
871 printk("alg %s ", *name); 832 printk("alg %s ", *name);
872 printk((crypto_alg_available(*name, 0)) ? 833 printk(crypto_has_alg(*name, 0, CRYPTO_ALG_ASYNC) ?
873 "found\n" : "not found\n"); 834 "found\n" : "not found\n");
874 name++; 835 name++;
875 } 836 }
876} 837}
@@ -885,79 +846,119 @@ static void do_test(void)
885 test_hash("sha1", sha1_tv_template, SHA1_TEST_VECTORS); 846 test_hash("sha1", sha1_tv_template, SHA1_TEST_VECTORS);
886 847
887 //DES 848 //DES
888 test_cipher ("des", MODE_ECB, ENCRYPT, des_enc_tv_template, DES_ENC_TEST_VECTORS); 849 test_cipher("ecb(des)", ENCRYPT, des_enc_tv_template,
889 test_cipher ("des", MODE_ECB, DECRYPT, des_dec_tv_template, DES_DEC_TEST_VECTORS); 850 DES_ENC_TEST_VECTORS);
890 test_cipher ("des", MODE_CBC, ENCRYPT, des_cbc_enc_tv_template, DES_CBC_ENC_TEST_VECTORS); 851 test_cipher("ecb(des)", DECRYPT, des_dec_tv_template,
891 test_cipher ("des", MODE_CBC, DECRYPT, des_cbc_dec_tv_template, DES_CBC_DEC_TEST_VECTORS); 852 DES_DEC_TEST_VECTORS);
853 test_cipher("cbc(des)", ENCRYPT, des_cbc_enc_tv_template,
854 DES_CBC_ENC_TEST_VECTORS);
855 test_cipher("cbc(des)", DECRYPT, des_cbc_dec_tv_template,
856 DES_CBC_DEC_TEST_VECTORS);
892 857
893 //DES3_EDE 858 //DES3_EDE
894 test_cipher ("des3_ede", MODE_ECB, ENCRYPT, des3_ede_enc_tv_template, DES3_EDE_ENC_TEST_VECTORS); 859 test_cipher("ecb(des3_ede)", ENCRYPT, des3_ede_enc_tv_template,
895 test_cipher ("des3_ede", MODE_ECB, DECRYPT, des3_ede_dec_tv_template, DES3_EDE_DEC_TEST_VECTORS); 860 DES3_EDE_ENC_TEST_VECTORS);
861 test_cipher("ecb(des3_ede)", DECRYPT, des3_ede_dec_tv_template,
862 DES3_EDE_DEC_TEST_VECTORS);
896 863
897 test_hash("md4", md4_tv_template, MD4_TEST_VECTORS); 864 test_hash("md4", md4_tv_template, MD4_TEST_VECTORS);
898 865
899 test_hash("sha256", sha256_tv_template, SHA256_TEST_VECTORS); 866 test_hash("sha256", sha256_tv_template, SHA256_TEST_VECTORS);
900 867
901 //BLOWFISH 868 //BLOWFISH
902 test_cipher ("blowfish", MODE_ECB, ENCRYPT, bf_enc_tv_template, BF_ENC_TEST_VECTORS); 869 test_cipher("ecb(blowfish)", ENCRYPT, bf_enc_tv_template,
903 test_cipher ("blowfish", MODE_ECB, DECRYPT, bf_dec_tv_template, BF_DEC_TEST_VECTORS); 870 BF_ENC_TEST_VECTORS);
904 test_cipher ("blowfish", MODE_CBC, ENCRYPT, bf_cbc_enc_tv_template, BF_CBC_ENC_TEST_VECTORS); 871 test_cipher("ecb(blowfish)", DECRYPT, bf_dec_tv_template,
905 test_cipher ("blowfish", MODE_CBC, DECRYPT, bf_cbc_dec_tv_template, BF_CBC_DEC_TEST_VECTORS); 872 BF_DEC_TEST_VECTORS);
873 test_cipher("cbc(blowfish)", ENCRYPT, bf_cbc_enc_tv_template,
874 BF_CBC_ENC_TEST_VECTORS);
875 test_cipher("cbc(blowfish)", DECRYPT, bf_cbc_dec_tv_template,
876 BF_CBC_DEC_TEST_VECTORS);
906 877
907 //TWOFISH 878 //TWOFISH
908 test_cipher ("twofish", MODE_ECB, ENCRYPT, tf_enc_tv_template, TF_ENC_TEST_VECTORS); 879 test_cipher("ecb(twofish)", ENCRYPT, tf_enc_tv_template,
909 test_cipher ("twofish", MODE_ECB, DECRYPT, tf_dec_tv_template, TF_DEC_TEST_VECTORS); 880 TF_ENC_TEST_VECTORS);
910 test_cipher ("twofish", MODE_CBC, ENCRYPT, tf_cbc_enc_tv_template, TF_CBC_ENC_TEST_VECTORS); 881 test_cipher("ecb(twofish)", DECRYPT, tf_dec_tv_template,
911 test_cipher ("twofish", MODE_CBC, DECRYPT, tf_cbc_dec_tv_template, TF_CBC_DEC_TEST_VECTORS); 882 TF_DEC_TEST_VECTORS);
883 test_cipher("cbc(twofish)", ENCRYPT, tf_cbc_enc_tv_template,
884 TF_CBC_ENC_TEST_VECTORS);
885 test_cipher("cbc(twofish)", DECRYPT, tf_cbc_dec_tv_template,
886 TF_CBC_DEC_TEST_VECTORS);
912 887
913 //SERPENT 888 //SERPENT
914 test_cipher ("serpent", MODE_ECB, ENCRYPT, serpent_enc_tv_template, SERPENT_ENC_TEST_VECTORS); 889 test_cipher("ecb(serpent)", ENCRYPT, serpent_enc_tv_template,
915 test_cipher ("serpent", MODE_ECB, DECRYPT, serpent_dec_tv_template, SERPENT_DEC_TEST_VECTORS); 890 SERPENT_ENC_TEST_VECTORS);
891 test_cipher("ecb(serpent)", DECRYPT, serpent_dec_tv_template,
892 SERPENT_DEC_TEST_VECTORS);
916 893
917 //TNEPRES 894 //TNEPRES
918 test_cipher ("tnepres", MODE_ECB, ENCRYPT, tnepres_enc_tv_template, TNEPRES_ENC_TEST_VECTORS); 895 test_cipher("ecb(tnepres)", ENCRYPT, tnepres_enc_tv_template,
919 test_cipher ("tnepres", MODE_ECB, DECRYPT, tnepres_dec_tv_template, TNEPRES_DEC_TEST_VECTORS); 896 TNEPRES_ENC_TEST_VECTORS);
897 test_cipher("ecb(tnepres)", DECRYPT, tnepres_dec_tv_template,
898 TNEPRES_DEC_TEST_VECTORS);
920 899
921 //AES 900 //AES
922 test_cipher ("aes", MODE_ECB, ENCRYPT, aes_enc_tv_template, AES_ENC_TEST_VECTORS); 901 test_cipher("ecb(aes)", ENCRYPT, aes_enc_tv_template,
923 test_cipher ("aes", MODE_ECB, DECRYPT, aes_dec_tv_template, AES_DEC_TEST_VECTORS); 902 AES_ENC_TEST_VECTORS);
924 test_cipher ("aes", MODE_CBC, ENCRYPT, aes_cbc_enc_tv_template, AES_CBC_ENC_TEST_VECTORS); 903 test_cipher("ecb(aes)", DECRYPT, aes_dec_tv_template,
925 test_cipher ("aes", MODE_CBC, DECRYPT, aes_cbc_dec_tv_template, AES_CBC_DEC_TEST_VECTORS); 904 AES_DEC_TEST_VECTORS);
905 test_cipher("cbc(aes)", ENCRYPT, aes_cbc_enc_tv_template,
906 AES_CBC_ENC_TEST_VECTORS);
907 test_cipher("cbc(aes)", DECRYPT, aes_cbc_dec_tv_template,
908 AES_CBC_DEC_TEST_VECTORS);
926 909
927 //CAST5 910 //CAST5
928 test_cipher ("cast5", MODE_ECB, ENCRYPT, cast5_enc_tv_template, CAST5_ENC_TEST_VECTORS); 911 test_cipher("ecb(cast5)", ENCRYPT, cast5_enc_tv_template,
929 test_cipher ("cast5", MODE_ECB, DECRYPT, cast5_dec_tv_template, CAST5_DEC_TEST_VECTORS); 912 CAST5_ENC_TEST_VECTORS);
913 test_cipher("ecb(cast5)", DECRYPT, cast5_dec_tv_template,
914 CAST5_DEC_TEST_VECTORS);
930 915
931 //CAST6 916 //CAST6
932 test_cipher ("cast6", MODE_ECB, ENCRYPT, cast6_enc_tv_template, CAST6_ENC_TEST_VECTORS); 917 test_cipher("ecb(cast6)", ENCRYPT, cast6_enc_tv_template,
933 test_cipher ("cast6", MODE_ECB, DECRYPT, cast6_dec_tv_template, CAST6_DEC_TEST_VECTORS); 918 CAST6_ENC_TEST_VECTORS);
919 test_cipher("ecb(cast6)", DECRYPT, cast6_dec_tv_template,
920 CAST6_DEC_TEST_VECTORS);
934 921
935 //ARC4 922 //ARC4
936 test_cipher ("arc4", MODE_ECB, ENCRYPT, arc4_enc_tv_template, ARC4_ENC_TEST_VECTORS); 923 test_cipher("ecb(arc4)", ENCRYPT, arc4_enc_tv_template,
937 test_cipher ("arc4", MODE_ECB, DECRYPT, arc4_dec_tv_template, ARC4_DEC_TEST_VECTORS); 924 ARC4_ENC_TEST_VECTORS);
925 test_cipher("ecb(arc4)", DECRYPT, arc4_dec_tv_template,
926 ARC4_DEC_TEST_VECTORS);
938 927
939 //TEA 928 //TEA
940 test_cipher ("tea", MODE_ECB, ENCRYPT, tea_enc_tv_template, TEA_ENC_TEST_VECTORS); 929 test_cipher("ecb(tea)", ENCRYPT, tea_enc_tv_template,
941 test_cipher ("tea", MODE_ECB, DECRYPT, tea_dec_tv_template, TEA_DEC_TEST_VECTORS); 930 TEA_ENC_TEST_VECTORS);
931 test_cipher("ecb(tea)", DECRYPT, tea_dec_tv_template,
932 TEA_DEC_TEST_VECTORS);
942 933
943 934
944 //XTEA 935 //XTEA
945 test_cipher ("xtea", MODE_ECB, ENCRYPT, xtea_enc_tv_template, XTEA_ENC_TEST_VECTORS); 936 test_cipher("ecb(xtea)", ENCRYPT, xtea_enc_tv_template,
946 test_cipher ("xtea", MODE_ECB, DECRYPT, xtea_dec_tv_template, XTEA_DEC_TEST_VECTORS); 937 XTEA_ENC_TEST_VECTORS);
938 test_cipher("ecb(xtea)", DECRYPT, xtea_dec_tv_template,
939 XTEA_DEC_TEST_VECTORS);
947 940
948 //KHAZAD 941 //KHAZAD
949 test_cipher ("khazad", MODE_ECB, ENCRYPT, khazad_enc_tv_template, KHAZAD_ENC_TEST_VECTORS); 942 test_cipher("ecb(khazad)", ENCRYPT, khazad_enc_tv_template,
950 test_cipher ("khazad", MODE_ECB, DECRYPT, khazad_dec_tv_template, KHAZAD_DEC_TEST_VECTORS); 943 KHAZAD_ENC_TEST_VECTORS);
944 test_cipher("ecb(khazad)", DECRYPT, khazad_dec_tv_template,
945 KHAZAD_DEC_TEST_VECTORS);
951 946
952 //ANUBIS 947 //ANUBIS
953 test_cipher ("anubis", MODE_ECB, ENCRYPT, anubis_enc_tv_template, ANUBIS_ENC_TEST_VECTORS); 948 test_cipher("ecb(anubis)", ENCRYPT, anubis_enc_tv_template,
954 test_cipher ("anubis", MODE_ECB, DECRYPT, anubis_dec_tv_template, ANUBIS_DEC_TEST_VECTORS); 949 ANUBIS_ENC_TEST_VECTORS);
955 test_cipher ("anubis", MODE_CBC, ENCRYPT, anubis_cbc_enc_tv_template, ANUBIS_CBC_ENC_TEST_VECTORS); 950 test_cipher("ecb(anubis)", DECRYPT, anubis_dec_tv_template,
956 test_cipher ("anubis", MODE_CBC, DECRYPT, anubis_cbc_dec_tv_template, ANUBIS_CBC_ENC_TEST_VECTORS); 951 ANUBIS_DEC_TEST_VECTORS);
952 test_cipher("cbc(anubis)", ENCRYPT, anubis_cbc_enc_tv_template,
953 ANUBIS_CBC_ENC_TEST_VECTORS);
954 test_cipher("cbc(anubis)", DECRYPT, anubis_cbc_dec_tv_template,
955 ANUBIS_CBC_ENC_TEST_VECTORS);
957 956
958 //XETA 957 //XETA
959 test_cipher ("xeta", MODE_ECB, ENCRYPT, xeta_enc_tv_template, XETA_ENC_TEST_VECTORS); 958 test_cipher("ecb(xeta)", ENCRYPT, xeta_enc_tv_template,
960 test_cipher ("xeta", MODE_ECB, DECRYPT, xeta_dec_tv_template, XETA_DEC_TEST_VECTORS); 959 XETA_ENC_TEST_VECTORS);
960 test_cipher("ecb(xeta)", DECRYPT, xeta_dec_tv_template,
961 XETA_DEC_TEST_VECTORS);
961 962
962 test_hash("sha384", sha384_tv_template, SHA384_TEST_VECTORS); 963 test_hash("sha384", sha384_tv_template, SHA384_TEST_VECTORS);
963 test_hash("sha512", sha512_tv_template, SHA512_TEST_VECTORS); 964 test_hash("sha512", sha512_tv_template, SHA512_TEST_VECTORS);
@@ -968,12 +969,13 @@ static void do_test(void)
968 test_hash("tgr160", tgr160_tv_template, TGR160_TEST_VECTORS); 969 test_hash("tgr160", tgr160_tv_template, TGR160_TEST_VECTORS);
969 test_hash("tgr128", tgr128_tv_template, TGR128_TEST_VECTORS); 970 test_hash("tgr128", tgr128_tv_template, TGR128_TEST_VECTORS);
970 test_deflate(); 971 test_deflate();
971 test_crc32c(); 972 test_hash("crc32c", crc32c_tv_template, CRC32C_TEST_VECTORS);
972#ifdef CONFIG_CRYPTO_HMAC 973 test_hash("hmac(md5)", hmac_md5_tv_template,
973 test_hmac("md5", hmac_md5_tv_template, HMAC_MD5_TEST_VECTORS); 974 HMAC_MD5_TEST_VECTORS);
974 test_hmac("sha1", hmac_sha1_tv_template, HMAC_SHA1_TEST_VECTORS); 975 test_hash("hmac(sha1)", hmac_sha1_tv_template,
975 test_hmac("sha256", hmac_sha256_tv_template, HMAC_SHA256_TEST_VECTORS); 976 HMAC_SHA1_TEST_VECTORS);
976#endif 977 test_hash("hmac(sha256)", hmac_sha256_tv_template,
978 HMAC_SHA256_TEST_VECTORS);
977 979
978 test_hash("michael_mic", michael_mic_tv_template, MICHAEL_MIC_TEST_VECTORS); 980 test_hash("michael_mic", michael_mic_tv_template, MICHAEL_MIC_TEST_VECTORS);
979 break; 981 break;
@@ -987,15 +989,21 @@ static void do_test(void)
987 break; 989 break;
988 990
989 case 3: 991 case 3:
990 test_cipher ("des", MODE_ECB, ENCRYPT, des_enc_tv_template, DES_ENC_TEST_VECTORS); 992 test_cipher("ecb(des)", ENCRYPT, des_enc_tv_template,
991 test_cipher ("des", MODE_ECB, DECRYPT, des_dec_tv_template, DES_DEC_TEST_VECTORS); 993 DES_ENC_TEST_VECTORS);
992 test_cipher ("des", MODE_CBC, ENCRYPT, des_cbc_enc_tv_template, DES_CBC_ENC_TEST_VECTORS); 994 test_cipher("ecb(des)", DECRYPT, des_dec_tv_template,
993 test_cipher ("des", MODE_CBC, DECRYPT, des_cbc_dec_tv_template, DES_CBC_DEC_TEST_VECTORS); 995 DES_DEC_TEST_VECTORS);
996 test_cipher("cbc(des)", ENCRYPT, des_cbc_enc_tv_template,
997 DES_CBC_ENC_TEST_VECTORS);
998 test_cipher("cbc(des)", DECRYPT, des_cbc_dec_tv_template,
999 DES_CBC_DEC_TEST_VECTORS);
994 break; 1000 break;
995 1001
996 case 4: 1002 case 4:
997 test_cipher ("des3_ede", MODE_ECB, ENCRYPT, des3_ede_enc_tv_template, DES3_EDE_ENC_TEST_VECTORS); 1003 test_cipher("ecb(des3_ede)", ENCRYPT, des3_ede_enc_tv_template,
998 test_cipher ("des3_ede", MODE_ECB, DECRYPT, des3_ede_dec_tv_template, DES3_EDE_DEC_TEST_VECTORS); 1004 DES3_EDE_ENC_TEST_VECTORS);
1005 test_cipher("ecb(des3_ede)", DECRYPT, des3_ede_dec_tv_template,
1006 DES3_EDE_DEC_TEST_VECTORS);
999 break; 1007 break;
1000 1008
1001 case 5: 1009 case 5:
@@ -1007,29 +1015,43 @@ static void do_test(void)
1007 break; 1015 break;
1008 1016
1009 case 7: 1017 case 7:
1010 test_cipher ("blowfish", MODE_ECB, ENCRYPT, bf_enc_tv_template, BF_ENC_TEST_VECTORS); 1018 test_cipher("ecb(blowfish)", ENCRYPT, bf_enc_tv_template,
1011 test_cipher ("blowfish", MODE_ECB, DECRYPT, bf_dec_tv_template, BF_DEC_TEST_VECTORS); 1019 BF_ENC_TEST_VECTORS);
1012 test_cipher ("blowfish", MODE_CBC, ENCRYPT, bf_cbc_enc_tv_template, BF_CBC_ENC_TEST_VECTORS); 1020 test_cipher("ecb(blowfish)", DECRYPT, bf_dec_tv_template,
1013 test_cipher ("blowfish", MODE_CBC, DECRYPT, bf_cbc_dec_tv_template, BF_CBC_DEC_TEST_VECTORS); 1021 BF_DEC_TEST_VECTORS);
1022 test_cipher("cbc(blowfish)", ENCRYPT, bf_cbc_enc_tv_template,
1023 BF_CBC_ENC_TEST_VECTORS);
1024 test_cipher("cbc(blowfish)", DECRYPT, bf_cbc_dec_tv_template,
1025 BF_CBC_DEC_TEST_VECTORS);
1014 break; 1026 break;
1015 1027
1016 case 8: 1028 case 8:
1017 test_cipher ("twofish", MODE_ECB, ENCRYPT, tf_enc_tv_template, TF_ENC_TEST_VECTORS); 1029 test_cipher("ecb(twofish)", ENCRYPT, tf_enc_tv_template,
1018 test_cipher ("twofish", MODE_ECB, DECRYPT, tf_dec_tv_template, TF_DEC_TEST_VECTORS); 1030 TF_ENC_TEST_VECTORS);
1019 test_cipher ("twofish", MODE_CBC, ENCRYPT, tf_cbc_enc_tv_template, TF_CBC_ENC_TEST_VECTORS); 1031 test_cipher("ecb(twofish)", DECRYPT, tf_dec_tv_template,
1020 test_cipher ("twofish", MODE_CBC, DECRYPT, tf_cbc_dec_tv_template, TF_CBC_DEC_TEST_VECTORS); 1032 TF_DEC_TEST_VECTORS);
1033 test_cipher("cbc(twofish)", ENCRYPT, tf_cbc_enc_tv_template,
1034 TF_CBC_ENC_TEST_VECTORS);
1035 test_cipher("cbc(twofish)", DECRYPT, tf_cbc_dec_tv_template,
1036 TF_CBC_DEC_TEST_VECTORS);
1021 break; 1037 break;
1022 1038
1023 case 9: 1039 case 9:
1024 test_cipher ("serpent", MODE_ECB, ENCRYPT, serpent_enc_tv_template, SERPENT_ENC_TEST_VECTORS); 1040 test_cipher("ecb(serpent)", ENCRYPT, serpent_enc_tv_template,
1025 test_cipher ("serpent", MODE_ECB, DECRYPT, serpent_dec_tv_template, SERPENT_DEC_TEST_VECTORS); 1041 SERPENT_ENC_TEST_VECTORS);
1042 test_cipher("ecb(serpent)", DECRYPT, serpent_dec_tv_template,
1043 SERPENT_DEC_TEST_VECTORS);
1026 break; 1044 break;
1027 1045
1028 case 10: 1046 case 10:
1029 test_cipher ("aes", MODE_ECB, ENCRYPT, aes_enc_tv_template, AES_ENC_TEST_VECTORS); 1047 test_cipher("ecb(aes)", ENCRYPT, aes_enc_tv_template,
1030 test_cipher ("aes", MODE_ECB, DECRYPT, aes_dec_tv_template, AES_DEC_TEST_VECTORS); 1048 AES_ENC_TEST_VECTORS);
1031 test_cipher ("aes", MODE_CBC, ENCRYPT, aes_cbc_enc_tv_template, AES_CBC_ENC_TEST_VECTORS); 1049 test_cipher("ecb(aes)", DECRYPT, aes_dec_tv_template,
1032 test_cipher ("aes", MODE_CBC, DECRYPT, aes_cbc_dec_tv_template, AES_CBC_DEC_TEST_VECTORS); 1050 AES_DEC_TEST_VECTORS);
1051 test_cipher("cbc(aes)", ENCRYPT, aes_cbc_enc_tv_template,
1052 AES_CBC_ENC_TEST_VECTORS);
1053 test_cipher("cbc(aes)", DECRYPT, aes_cbc_dec_tv_template,
1054 AES_CBC_DEC_TEST_VECTORS);
1033 break; 1055 break;
1034 1056
1035 case 11: 1057 case 11:
@@ -1045,18 +1067,24 @@ static void do_test(void)
1045 break; 1067 break;
1046 1068
1047 case 14: 1069 case 14:
1048 test_cipher ("cast5", MODE_ECB, ENCRYPT, cast5_enc_tv_template, CAST5_ENC_TEST_VECTORS); 1070 test_cipher("ecb(cast5)", ENCRYPT, cast5_enc_tv_template,
1049 test_cipher ("cast5", MODE_ECB, DECRYPT, cast5_dec_tv_template, CAST5_DEC_TEST_VECTORS); 1071 CAST5_ENC_TEST_VECTORS);
1072 test_cipher("ecb(cast5)", DECRYPT, cast5_dec_tv_template,
1073 CAST5_DEC_TEST_VECTORS);
1050 break; 1074 break;
1051 1075
1052 case 15: 1076 case 15:
1053 test_cipher ("cast6", MODE_ECB, ENCRYPT, cast6_enc_tv_template, CAST6_ENC_TEST_VECTORS); 1077 test_cipher("ecb(cast6)", ENCRYPT, cast6_enc_tv_template,
1054 test_cipher ("cast6", MODE_ECB, DECRYPT, cast6_dec_tv_template, CAST6_DEC_TEST_VECTORS); 1078 CAST6_ENC_TEST_VECTORS);
1079 test_cipher("ecb(cast6)", DECRYPT, cast6_dec_tv_template,
1080 CAST6_DEC_TEST_VECTORS);
1055 break; 1081 break;
1056 1082
1057 case 16: 1083 case 16:
1058 test_cipher ("arc4", MODE_ECB, ENCRYPT, arc4_enc_tv_template, ARC4_ENC_TEST_VECTORS); 1084 test_cipher("ecb(arc4)", ENCRYPT, arc4_enc_tv_template,
1059 test_cipher ("arc4", MODE_ECB, DECRYPT, arc4_dec_tv_template, ARC4_DEC_TEST_VECTORS); 1085 ARC4_ENC_TEST_VECTORS);
1086 test_cipher("ecb(arc4)", DECRYPT, arc4_dec_tv_template,
1087 ARC4_DEC_TEST_VECTORS);
1060 break; 1088 break;
1061 1089
1062 case 17: 1090 case 17:
@@ -1064,22 +1092,28 @@ static void do_test(void)
1064 break; 1092 break;
1065 1093
1066 case 18: 1094 case 18:
1067 test_crc32c(); 1095 test_hash("crc32c", crc32c_tv_template, CRC32C_TEST_VECTORS);
1068 break; 1096 break;
1069 1097
1070 case 19: 1098 case 19:
1071 test_cipher ("tea", MODE_ECB, ENCRYPT, tea_enc_tv_template, TEA_ENC_TEST_VECTORS); 1099 test_cipher("ecb(tea)", ENCRYPT, tea_enc_tv_template,
1072 test_cipher ("tea", MODE_ECB, DECRYPT, tea_dec_tv_template, TEA_DEC_TEST_VECTORS); 1100 TEA_ENC_TEST_VECTORS);
1101 test_cipher("ecb(tea)", DECRYPT, tea_dec_tv_template,
1102 TEA_DEC_TEST_VECTORS);
1073 break; 1103 break;
1074 1104
1075 case 20: 1105 case 20:
1076 test_cipher ("xtea", MODE_ECB, ENCRYPT, xtea_enc_tv_template, XTEA_ENC_TEST_VECTORS); 1106 test_cipher("ecb(xtea)", ENCRYPT, xtea_enc_tv_template,
1077 test_cipher ("xtea", MODE_ECB, DECRYPT, xtea_dec_tv_template, XTEA_DEC_TEST_VECTORS); 1107 XTEA_ENC_TEST_VECTORS);
1108 test_cipher("ecb(xtea)", DECRYPT, xtea_dec_tv_template,
1109 XTEA_DEC_TEST_VECTORS);
1078 break; 1110 break;
1079 1111
1080 case 21: 1112 case 21:
1081 test_cipher ("khazad", MODE_ECB, ENCRYPT, khazad_enc_tv_template, KHAZAD_ENC_TEST_VECTORS); 1113 test_cipher("ecb(khazad)", ENCRYPT, khazad_enc_tv_template,
1082 test_cipher ("khazad", MODE_ECB, DECRYPT, khazad_dec_tv_template, KHAZAD_DEC_TEST_VECTORS); 1114 KHAZAD_ENC_TEST_VECTORS);
1115 test_cipher("ecb(khazad)", DECRYPT, khazad_dec_tv_template,
1116 KHAZAD_DEC_TEST_VECTORS);
1083 break; 1117 break;
1084 1118
1085 case 22: 1119 case 22:
@@ -1095,15 +1129,21 @@ static void do_test(void)
1095 break; 1129 break;
1096 1130
1097 case 25: 1131 case 25:
1098 test_cipher ("tnepres", MODE_ECB, ENCRYPT, tnepres_enc_tv_template, TNEPRES_ENC_TEST_VECTORS); 1132 test_cipher("ecb(tnepres)", ENCRYPT, tnepres_enc_tv_template,
1099 test_cipher ("tnepres", MODE_ECB, DECRYPT, tnepres_dec_tv_template, TNEPRES_DEC_TEST_VECTORS); 1133 TNEPRES_ENC_TEST_VECTORS);
1134 test_cipher("ecb(tnepres)", DECRYPT, tnepres_dec_tv_template,
1135 TNEPRES_DEC_TEST_VECTORS);
1100 break; 1136 break;
1101 1137
1102 case 26: 1138 case 26:
1103 test_cipher ("anubis", MODE_ECB, ENCRYPT, anubis_enc_tv_template, ANUBIS_ENC_TEST_VECTORS); 1139 test_cipher("ecb(anubis)", ENCRYPT, anubis_enc_tv_template,
1104 test_cipher ("anubis", MODE_ECB, DECRYPT, anubis_dec_tv_template, ANUBIS_DEC_TEST_VECTORS); 1140 ANUBIS_ENC_TEST_VECTORS);
1105 test_cipher ("anubis", MODE_CBC, ENCRYPT, anubis_cbc_enc_tv_template, ANUBIS_CBC_ENC_TEST_VECTORS); 1141 test_cipher("ecb(anubis)", DECRYPT, anubis_dec_tv_template,
1106 test_cipher ("anubis", MODE_CBC, DECRYPT, anubis_cbc_dec_tv_template, ANUBIS_CBC_ENC_TEST_VECTORS); 1142 ANUBIS_DEC_TEST_VECTORS);
1143 test_cipher("cbc(anubis)", ENCRYPT, anubis_cbc_enc_tv_template,
1144 ANUBIS_CBC_ENC_TEST_VECTORS);
1145 test_cipher("cbc(anubis)", DECRYPT, anubis_cbc_dec_tv_template,
1146 ANUBIS_CBC_ENC_TEST_VECTORS);
1107 break; 1147 break;
1108 1148
1109 case 27: 1149 case 27:
@@ -1120,85 +1160,88 @@ static void do_test(void)
1120 break; 1160 break;
1121 1161
1122 case 30: 1162 case 30:
1123 test_cipher ("xeta", MODE_ECB, ENCRYPT, xeta_enc_tv_template, XETA_ENC_TEST_VECTORS); 1163 test_cipher("ecb(xeta)", ENCRYPT, xeta_enc_tv_template,
1124 test_cipher ("xeta", MODE_ECB, DECRYPT, xeta_dec_tv_template, XETA_DEC_TEST_VECTORS); 1164 XETA_ENC_TEST_VECTORS);
1165 test_cipher("ecb(xeta)", DECRYPT, xeta_dec_tv_template,
1166 XETA_DEC_TEST_VECTORS);
1125 break; 1167 break;
1126 1168
1127#ifdef CONFIG_CRYPTO_HMAC
1128 case 100: 1169 case 100:
1129 test_hmac("md5", hmac_md5_tv_template, HMAC_MD5_TEST_VECTORS); 1170 test_hash("hmac(md5)", hmac_md5_tv_template,
1171 HMAC_MD5_TEST_VECTORS);
1130 break; 1172 break;
1131 1173
1132 case 101: 1174 case 101:
1133 test_hmac("sha1", hmac_sha1_tv_template, HMAC_SHA1_TEST_VECTORS); 1175 test_hash("hmac(sha1)", hmac_sha1_tv_template,
1176 HMAC_SHA1_TEST_VECTORS);
1134 break; 1177 break;
1135 1178
1136 case 102: 1179 case 102:
1137 test_hmac("sha256", hmac_sha256_tv_template, HMAC_SHA256_TEST_VECTORS); 1180 test_hash("hmac(sha256)", hmac_sha256_tv_template,
1181 HMAC_SHA256_TEST_VECTORS);
1138 break; 1182 break;
1139 1183
1140#endif
1141 1184
1142 case 200: 1185 case 200:
1143 test_cipher_speed("aes", MODE_ECB, ENCRYPT, sec, NULL, 0, 1186 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
1144 aes_speed_template); 1187 aes_speed_template);
1145 test_cipher_speed("aes", MODE_ECB, DECRYPT, sec, NULL, 0, 1188 test_cipher_speed("ecb(aes)", DECRYPT, sec, NULL, 0,
1146 aes_speed_template); 1189 aes_speed_template);
1147 test_cipher_speed("aes", MODE_CBC, ENCRYPT, sec, NULL, 0, 1190 test_cipher_speed("cbc(aes)", ENCRYPT, sec, NULL, 0,
1148 aes_speed_template); 1191 aes_speed_template);
1149 test_cipher_speed("aes", MODE_CBC, DECRYPT, sec, NULL, 0, 1192 test_cipher_speed("cbc(aes)", DECRYPT, sec, NULL, 0,
1150 aes_speed_template); 1193 aes_speed_template);
1151 break; 1194 break;
1152 1195
1153 case 201: 1196 case 201:
1154 test_cipher_speed("des3_ede", MODE_ECB, ENCRYPT, sec, 1197 test_cipher_speed("ecb(des3_ede)", ENCRYPT, sec,
1155 des3_ede_enc_tv_template, 1198 des3_ede_enc_tv_template,
1156 DES3_EDE_ENC_TEST_VECTORS, 1199 DES3_EDE_ENC_TEST_VECTORS,
1157 des3_ede_speed_template); 1200 des3_ede_speed_template);
1158 test_cipher_speed("des3_ede", MODE_ECB, DECRYPT, sec, 1201 test_cipher_speed("ecb(des3_ede)", DECRYPT, sec,
1159 des3_ede_dec_tv_template, 1202 des3_ede_dec_tv_template,
1160 DES3_EDE_DEC_TEST_VECTORS, 1203 DES3_EDE_DEC_TEST_VECTORS,
1161 des3_ede_speed_template); 1204 des3_ede_speed_template);
1162 test_cipher_speed("des3_ede", MODE_CBC, ENCRYPT, sec, 1205 test_cipher_speed("cbc(des3_ede)", ENCRYPT, sec,
1163 des3_ede_enc_tv_template, 1206 des3_ede_enc_tv_template,
1164 DES3_EDE_ENC_TEST_VECTORS, 1207 DES3_EDE_ENC_TEST_VECTORS,
1165 des3_ede_speed_template); 1208 des3_ede_speed_template);
1166 test_cipher_speed("des3_ede", MODE_CBC, DECRYPT, sec, 1209 test_cipher_speed("cbc(des3_ede)", DECRYPT, sec,
1167 des3_ede_dec_tv_template, 1210 des3_ede_dec_tv_template,
1168 DES3_EDE_DEC_TEST_VECTORS, 1211 DES3_EDE_DEC_TEST_VECTORS,
1169 des3_ede_speed_template); 1212 des3_ede_speed_template);
1170 break; 1213 break;
1171 1214
1172 case 202: 1215 case 202:
1173 test_cipher_speed("twofish", MODE_ECB, ENCRYPT, sec, NULL, 0, 1216 test_cipher_speed("ecb(twofish)", ENCRYPT, sec, NULL, 0,
1174 twofish_speed_template); 1217 twofish_speed_template);
1175 test_cipher_speed("twofish", MODE_ECB, DECRYPT, sec, NULL, 0, 1218 test_cipher_speed("ecb(twofish)", DECRYPT, sec, NULL, 0,
1176 twofish_speed_template); 1219 twofish_speed_template);
1177 test_cipher_speed("twofish", MODE_CBC, ENCRYPT, sec, NULL, 0, 1220 test_cipher_speed("cbc(twofish)", ENCRYPT, sec, NULL, 0,
1178 twofish_speed_template); 1221 twofish_speed_template);
1179 test_cipher_speed("twofish", MODE_CBC, DECRYPT, sec, NULL, 0, 1222 test_cipher_speed("cbc(twofish)", DECRYPT, sec, NULL, 0,
1180 twofish_speed_template); 1223 twofish_speed_template);
1181 break; 1224 break;
1182 1225
1183 case 203: 1226 case 203:
1184 test_cipher_speed("blowfish", MODE_ECB, ENCRYPT, sec, NULL, 0, 1227 test_cipher_speed("ecb(blowfish)", ENCRYPT, sec, NULL, 0,
1185 blowfish_speed_template); 1228 blowfish_speed_template);
1186 test_cipher_speed("blowfish", MODE_ECB, DECRYPT, sec, NULL, 0, 1229 test_cipher_speed("ecb(blowfish)", DECRYPT, sec, NULL, 0,
1187 blowfish_speed_template); 1230 blowfish_speed_template);
1188 test_cipher_speed("blowfish", MODE_CBC, ENCRYPT, sec, NULL, 0, 1231 test_cipher_speed("cbc(blowfish)", ENCRYPT, sec, NULL, 0,
1189 blowfish_speed_template); 1232 blowfish_speed_template);
1190 test_cipher_speed("blowfish", MODE_CBC, DECRYPT, sec, NULL, 0, 1233 test_cipher_speed("cbc(blowfish)", DECRYPT, sec, NULL, 0,
1191 blowfish_speed_template); 1234 blowfish_speed_template);
1192 break; 1235 break;
1193 1236
1194 case 204: 1237 case 204:
1195 test_cipher_speed("des", MODE_ECB, ENCRYPT, sec, NULL, 0, 1238 test_cipher_speed("ecb(des)", ENCRYPT, sec, NULL, 0,
1196 des_speed_template); 1239 des_speed_template);
1197 test_cipher_speed("des", MODE_ECB, DECRYPT, sec, NULL, 0, 1240 test_cipher_speed("ecb(des)", DECRYPT, sec, NULL, 0,
1198 des_speed_template); 1241 des_speed_template);
1199 test_cipher_speed("des", MODE_CBC, ENCRYPT, sec, NULL, 0, 1242 test_cipher_speed("cbc(des)", ENCRYPT, sec, NULL, 0,
1200 des_speed_template); 1243 des_speed_template);
1201 test_cipher_speed("des", MODE_CBC, DECRYPT, sec, NULL, 0, 1244 test_cipher_speed("cbc(des)", DECRYPT, sec, NULL, 0,
1202 des_speed_template); 1245 des_speed_template);
1203 break; 1246 break;
1204 1247
@@ -1206,51 +1249,51 @@ static void do_test(void)
1206 /* fall through */ 1249 /* fall through */
1207 1250
1208 case 301: 1251 case 301:
1209 test_digest_speed("md4", sec, generic_digest_speed_template); 1252 test_hash_speed("md4", sec, generic_hash_speed_template);
1210 if (mode > 300 && mode < 400) break; 1253 if (mode > 300 && mode < 400) break;
1211 1254
1212 case 302: 1255 case 302:
1213 test_digest_speed("md5", sec, generic_digest_speed_template); 1256 test_hash_speed("md5", sec, generic_hash_speed_template);
1214 if (mode > 300 && mode < 400) break; 1257 if (mode > 300 && mode < 400) break;
1215 1258
1216 case 303: 1259 case 303:
1217 test_digest_speed("sha1", sec, generic_digest_speed_template); 1260 test_hash_speed("sha1", sec, generic_hash_speed_template);
1218 if (mode > 300 && mode < 400) break; 1261 if (mode > 300 && mode < 400) break;
1219 1262
1220 case 304: 1263 case 304:
1221 test_digest_speed("sha256", sec, generic_digest_speed_template); 1264 test_hash_speed("sha256", sec, generic_hash_speed_template);
1222 if (mode > 300 && mode < 400) break; 1265 if (mode > 300 && mode < 400) break;
1223 1266
1224 case 305: 1267 case 305:
1225 test_digest_speed("sha384", sec, generic_digest_speed_template); 1268 test_hash_speed("sha384", sec, generic_hash_speed_template);
1226 if (mode > 300 && mode < 400) break; 1269 if (mode > 300 && mode < 400) break;
1227 1270
1228 case 306: 1271 case 306:
1229 test_digest_speed("sha512", sec, generic_digest_speed_template); 1272 test_hash_speed("sha512", sec, generic_hash_speed_template);
1230 if (mode > 300 && mode < 400) break; 1273 if (mode > 300 && mode < 400) break;
1231 1274
1232 case 307: 1275 case 307:
1233 test_digest_speed("wp256", sec, generic_digest_speed_template); 1276 test_hash_speed("wp256", sec, generic_hash_speed_template);
1234 if (mode > 300 && mode < 400) break; 1277 if (mode > 300 && mode < 400) break;
1235 1278
1236 case 308: 1279 case 308:
1237 test_digest_speed("wp384", sec, generic_digest_speed_template); 1280 test_hash_speed("wp384", sec, generic_hash_speed_template);
1238 if (mode > 300 && mode < 400) break; 1281 if (mode > 300 && mode < 400) break;
1239 1282
1240 case 309: 1283 case 309:
1241 test_digest_speed("wp512", sec, generic_digest_speed_template); 1284 test_hash_speed("wp512", sec, generic_hash_speed_template);
1242 if (mode > 300 && mode < 400) break; 1285 if (mode > 300 && mode < 400) break;
1243 1286
1244 case 310: 1287 case 310:
1245 test_digest_speed("tgr128", sec, generic_digest_speed_template); 1288 test_hash_speed("tgr128", sec, generic_hash_speed_template);
1246 if (mode > 300 && mode < 400) break; 1289 if (mode > 300 && mode < 400) break;
1247 1290
1248 case 311: 1291 case 311:
1249 test_digest_speed("tgr160", sec, generic_digest_speed_template); 1292 test_hash_speed("tgr160", sec, generic_hash_speed_template);
1250 if (mode > 300 && mode < 400) break; 1293 if (mode > 300 && mode < 400) break;
1251 1294
1252 case 312: 1295 case 312:
1253 test_digest_speed("tgr192", sec, generic_digest_speed_template); 1296 test_hash_speed("tgr192", sec, generic_hash_speed_template);
1254 if (mode > 300 && mode < 400) break; 1297 if (mode > 300 && mode < 400) break;
1255 1298
1256 case 399: 1299 case 399:
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index 1fac5602f633..a40c4411729e 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -28,7 +28,7 @@
28struct hash_testvec { 28struct hash_testvec {
29 /* only used with keyed hash algorithms */ 29 /* only used with keyed hash algorithms */
30 char key[128] __attribute__ ((__aligned__(4))); 30 char key[128] __attribute__ ((__aligned__(4)));
31 char plaintext[128]; 31 char plaintext[240];
32 char digest[MAX_DIGEST_SIZE]; 32 char digest[MAX_DIGEST_SIZE];
33 unsigned char tap[MAX_TAP]; 33 unsigned char tap[MAX_TAP];
34 unsigned char psize; 34 unsigned char psize;
@@ -36,16 +36,6 @@ struct hash_testvec {
36 unsigned char ksize; 36 unsigned char ksize;
37}; 37};
38 38
39struct hmac_testvec {
40 char key[128];
41 char plaintext[128];
42 char digest[MAX_DIGEST_SIZE];
43 unsigned char tap[MAX_TAP];
44 unsigned char ksize;
45 unsigned char psize;
46 unsigned char np;
47};
48
49struct cipher_testvec { 39struct cipher_testvec {
50 char key[MAX_KEYLEN] __attribute__ ((__aligned__(4))); 40 char key[MAX_KEYLEN] __attribute__ ((__aligned__(4)));
51 char iv[MAX_IVLEN]; 41 char iv[MAX_IVLEN];
@@ -65,7 +55,7 @@ struct cipher_speed {
65 unsigned int blen; 55 unsigned int blen;
66}; 56};
67 57
68struct digest_speed { 58struct hash_speed {
69 unsigned int blen; /* buffer length */ 59 unsigned int blen; /* buffer length */
70 unsigned int plen; /* per-update length */ 60 unsigned int plen; /* per-update length */
71}; 61};
@@ -697,14 +687,13 @@ static struct hash_testvec tgr128_tv_template[] = {
697 }, 687 },
698}; 688};
699 689
700#ifdef CONFIG_CRYPTO_HMAC
701/* 690/*
702 * HMAC-MD5 test vectors from RFC2202 691 * HMAC-MD5 test vectors from RFC2202
703 * (These need to be fixed to not use strlen). 692 * (These need to be fixed to not use strlen).
704 */ 693 */
705#define HMAC_MD5_TEST_VECTORS 7 694#define HMAC_MD5_TEST_VECTORS 7
706 695
707static struct hmac_testvec hmac_md5_tv_template[] = 696static struct hash_testvec hmac_md5_tv_template[] =
708{ 697{
709 { 698 {
710 .key = { [0 ... 15] = 0x0b }, 699 .key = { [0 ... 15] = 0x0b },
@@ -768,7 +757,7 @@ static struct hmac_testvec hmac_md5_tv_template[] =
768 */ 757 */
769#define HMAC_SHA1_TEST_VECTORS 7 758#define HMAC_SHA1_TEST_VECTORS 7
770 759
771static struct hmac_testvec hmac_sha1_tv_template[] = { 760static struct hash_testvec hmac_sha1_tv_template[] = {
772 { 761 {
773 .key = { [0 ... 19] = 0x0b }, 762 .key = { [0 ... 19] = 0x0b },
774 .ksize = 20, 763 .ksize = 20,
@@ -833,7 +822,7 @@ static struct hmac_testvec hmac_sha1_tv_template[] = {
833 */ 822 */
834#define HMAC_SHA256_TEST_VECTORS 10 823#define HMAC_SHA256_TEST_VECTORS 10
835 824
836static struct hmac_testvec hmac_sha256_tv_template[] = { 825static struct hash_testvec hmac_sha256_tv_template[] = {
837 { 826 {
838 .key = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 827 .key = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
839 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 828 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
@@ -944,8 +933,6 @@ static struct hmac_testvec hmac_sha256_tv_template[] = {
944 }, 933 },
945}; 934};
946 935
947#endif /* CONFIG_CRYPTO_HMAC */
948
949/* 936/*
950 * DES test vectors. 937 * DES test vectors.
951 */ 938 */
@@ -2897,6 +2884,183 @@ static struct hash_testvec michael_mic_tv_template[] = {
2897}; 2884};
2898 2885
2899/* 2886/*
2887 * CRC32C test vectors
2888 */
2889#define CRC32C_TEST_VECTORS 14
2890
2891static struct hash_testvec crc32c_tv_template[] = {
2892 {
2893 .psize = 0,
2894 .digest = { 0x00, 0x00, 0x00, 0x00 }
2895 },
2896 {
2897 .key = { 0x87, 0xa9, 0xcb, 0xed },
2898 .ksize = 4,
2899 .psize = 0,
2900 .digest = { 0x78, 0x56, 0x34, 0x12 },
2901 },
2902 {
2903 .key = { 0xff, 0xff, 0xff, 0xff },
2904 .ksize = 4,
2905 .plaintext = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
2906 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
2907 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
2908 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
2909 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28 },
2910 .psize = 40,
2911 .digest = { 0x7f, 0x15, 0x2c, 0x0e }
2912 },
2913 {
2914 .key = { 0xff, 0xff, 0xff, 0xff },
2915 .ksize = 4,
2916 .plaintext = { 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
2917 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38,
2918 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40,
2919 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
2920 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50 },
2921 .psize = 40,
2922 .digest = { 0xf6, 0xeb, 0x80, 0xe9 }
2923 },
2924 {
2925 .key = { 0xff, 0xff, 0xff, 0xff },
2926 .ksize = 4,
2927 .plaintext = { 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
2928 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60,
2929 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
2930 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70,
2931 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78 },
2932 .psize = 40,
2933 .digest = { 0xed, 0xbd, 0x74, 0xde }
2934 },
2935 {
2936 .key = { 0xff, 0xff, 0xff, 0xff },
2937 .ksize = 4,
2938 .plaintext = { 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80,
2939 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88,
2940 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90,
2941 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
2942 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0 },
2943 .psize = 40,
2944 .digest = { 0x62, 0xc8, 0x79, 0xd5 }
2945 },
2946 {
2947 .key = { 0xff, 0xff, 0xff, 0xff },
2948 .ksize = 4,
2949 .plaintext = { 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8,
2950 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0,
2951 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8,
2952 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0,
2953 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8 },
2954 .psize = 40,
2955 .digest = { 0xd0, 0x9a, 0x97, 0xba }
2956 },
2957 {
2958 .key = { 0xff, 0xff, 0xff, 0xff },
2959 .ksize = 4,
2960 .plaintext = { 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0,
2961 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8,
2962 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0,
2963 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8,
2964 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0 },
2965 .psize = 40,
2966 .digest = { 0x13, 0xd9, 0x29, 0x2b }
2967 },
2968 {
2969 .key = { 0x80, 0xea, 0xd3, 0xf1 },
2970 .ksize = 4,
2971 .plaintext = { 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
2972 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38,
2973 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40,
2974 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
2975 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50 },
2976 .psize = 40,
2977 .digest = { 0x0c, 0xb5, 0xe2, 0xa2 }
2978 },
2979 {
2980 .key = { 0xf3, 0x4a, 0x1d, 0x5d },
2981 .ksize = 4,
2982 .plaintext = { 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
2983 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60,
2984 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
2985 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70,
2986 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78 },
2987 .psize = 40,
2988 .digest = { 0xd1, 0x7f, 0xfb, 0xa6 }
2989 },
2990 {
2991 .key = { 0x2e, 0x80, 0x04, 0x59 },
2992 .ksize = 4,
2993 .plaintext = { 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80,
2994 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88,
2995 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90,
2996 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
2997 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0 },
2998 .psize = 40,
2999 .digest = { 0x59, 0x33, 0xe6, 0x7a }
3000 },
3001 {
3002 .key = { 0xa6, 0xcc, 0x19, 0x85 },
3003 .ksize = 4,
3004 .plaintext = { 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8,
3005 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0,
3006 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8,
3007 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0,
3008 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8 },
3009 .psize = 40,
3010 .digest = { 0xbe, 0x03, 0x01, 0xd2 }
3011 },
3012 {
3013 .key = { 0x41, 0xfc, 0xfe, 0x2d },
3014 .ksize = 4,
3015 .plaintext = { 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0,
3016 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8,
3017 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0,
3018 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8,
3019 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0 },
3020 .psize = 40,
3021 .digest = { 0x75, 0xd3, 0xc5, 0x24 }
3022 },
3023 {
3024 .key = { 0xff, 0xff, 0xff, 0xff },
3025 .ksize = 4,
3026 .plaintext = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
3027 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
3028 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
3029 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
3030 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
3031 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
3032 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38,
3033 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40,
3034 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
3035 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50,
3036 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
3037 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60,
3038 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
3039 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70,
3040 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
3041 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80,
3042 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88,
3043 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90,
3044 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
3045 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0,
3046 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8,
3047 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0,
3048 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8,
3049 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0,
3050 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8,
3051 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0,
3052 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8,
3053 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0,
3054 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8,
3055 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0 },
3056 .psize = 240,
3057 .digest = { 0x75, 0xd3, 0xc5, 0x24 },
3058 .np = 2,
3059 .tap = { 31, 209 }
3060 },
3061};
3062
3063/*
2900 * Cipher speed tests 3064 * Cipher speed tests
2901 */ 3065 */
2902static struct cipher_speed aes_speed_template[] = { 3066static struct cipher_speed aes_speed_template[] = {
@@ -2983,7 +3147,7 @@ static struct cipher_speed des_speed_template[] = {
2983/* 3147/*
2984 * Digest speed tests 3148 * Digest speed tests
2985 */ 3149 */
2986static struct digest_speed generic_digest_speed_template[] = { 3150static struct hash_speed generic_hash_speed_template[] = {
2987 { .blen = 16, .plen = 16, }, 3151 { .blen = 16, .plen = 16, },
2988 { .blen = 64, .plen = 16, }, 3152 { .blen = 64, .plen = 16, },
2989 { .blen = 64, .plen = 64, }, 3153 { .blen = 64, .plen = 64, },
diff --git a/crypto/tea.c b/crypto/tea.c
index 5367adc82fc9..1c54e26fa529 100644
--- a/crypto/tea.c
+++ b/crypto/tea.c
@@ -46,16 +46,10 @@ struct xtea_ctx {
46}; 46};
47 47
48static int tea_setkey(struct crypto_tfm *tfm, const u8 *in_key, 48static int tea_setkey(struct crypto_tfm *tfm, const u8 *in_key,
49 unsigned int key_len, u32 *flags) 49 unsigned int key_len)
50{ 50{
51 struct tea_ctx *ctx = crypto_tfm_ctx(tfm); 51 struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
52 const __le32 *key = (const __le32 *)in_key; 52 const __le32 *key = (const __le32 *)in_key;
53
54 if (key_len != 16)
55 {
56 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
57 return -EINVAL;
58 }
59 53
60 ctx->KEY[0] = le32_to_cpu(key[0]); 54 ctx->KEY[0] = le32_to_cpu(key[0]);
61 ctx->KEY[1] = le32_to_cpu(key[1]); 55 ctx->KEY[1] = le32_to_cpu(key[1]);
@@ -125,16 +119,10 @@ static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
125} 119}
126 120
127static int xtea_setkey(struct crypto_tfm *tfm, const u8 *in_key, 121static int xtea_setkey(struct crypto_tfm *tfm, const u8 *in_key,
128 unsigned int key_len, u32 *flags) 122 unsigned int key_len)
129{ 123{
130 struct xtea_ctx *ctx = crypto_tfm_ctx(tfm); 124 struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
131 const __le32 *key = (const __le32 *)in_key; 125 const __le32 *key = (const __le32 *)in_key;
132
133 if (key_len != 16)
134 {
135 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
136 return -EINVAL;
137 }
138 126
139 ctx->KEY[0] = le32_to_cpu(key[0]); 127 ctx->KEY[0] = le32_to_cpu(key[0]);
140 ctx->KEY[1] = le32_to_cpu(key[1]); 128 ctx->KEY[1] = le32_to_cpu(key[1]);
diff --git a/crypto/twofish.c b/crypto/twofish.c
index ec2488242e2d..4979a2be48a9 100644
--- a/crypto/twofish.c
+++ b/crypto/twofish.c
@@ -39,6 +39,7 @@
39 */ 39 */
40 40
41#include <asm/byteorder.h> 41#include <asm/byteorder.h>
42#include <crypto/twofish.h>
42#include <linux/module.h> 43#include <linux/module.h>
43#include <linux/init.h> 44#include <linux/init.h>
44#include <linux/types.h> 45#include <linux/types.h>
@@ -46,534 +47,6 @@
46#include <linux/crypto.h> 47#include <linux/crypto.h>
47#include <linux/bitops.h> 48#include <linux/bitops.h>
48 49
49
50/* The large precomputed tables for the Twofish cipher (twofish.c)
51 * Taken from the same source as twofish.c
52 * Marc Mutz <Marc@Mutz.com>
53 */
54
55/* These two tables are the q0 and q1 permutations, exactly as described in
56 * the Twofish paper. */
57
58static const u8 q0[256] = {
59 0xA9, 0x67, 0xB3, 0xE8, 0x04, 0xFD, 0xA3, 0x76, 0x9A, 0x92, 0x80, 0x78,
60 0xE4, 0xDD, 0xD1, 0x38, 0x0D, 0xC6, 0x35, 0x98, 0x18, 0xF7, 0xEC, 0x6C,
61 0x43, 0x75, 0x37, 0x26, 0xFA, 0x13, 0x94, 0x48, 0xF2, 0xD0, 0x8B, 0x30,
62 0x84, 0x54, 0xDF, 0x23, 0x19, 0x5B, 0x3D, 0x59, 0xF3, 0xAE, 0xA2, 0x82,
63 0x63, 0x01, 0x83, 0x2E, 0xD9, 0x51, 0x9B, 0x7C, 0xA6, 0xEB, 0xA5, 0xBE,
64 0x16, 0x0C, 0xE3, 0x61, 0xC0, 0x8C, 0x3A, 0xF5, 0x73, 0x2C, 0x25, 0x0B,
65 0xBB, 0x4E, 0x89, 0x6B, 0x53, 0x6A, 0xB4, 0xF1, 0xE1, 0xE6, 0xBD, 0x45,
66 0xE2, 0xF4, 0xB6, 0x66, 0xCC, 0x95, 0x03, 0x56, 0xD4, 0x1C, 0x1E, 0xD7,
67 0xFB, 0xC3, 0x8E, 0xB5, 0xE9, 0xCF, 0xBF, 0xBA, 0xEA, 0x77, 0x39, 0xAF,
68 0x33, 0xC9, 0x62, 0x71, 0x81, 0x79, 0x09, 0xAD, 0x24, 0xCD, 0xF9, 0xD8,
69 0xE5, 0xC5, 0xB9, 0x4D, 0x44, 0x08, 0x86, 0xE7, 0xA1, 0x1D, 0xAA, 0xED,
70 0x06, 0x70, 0xB2, 0xD2, 0x41, 0x7B, 0xA0, 0x11, 0x31, 0xC2, 0x27, 0x90,
71 0x20, 0xF6, 0x60, 0xFF, 0x96, 0x5C, 0xB1, 0xAB, 0x9E, 0x9C, 0x52, 0x1B,
72 0x5F, 0x93, 0x0A, 0xEF, 0x91, 0x85, 0x49, 0xEE, 0x2D, 0x4F, 0x8F, 0x3B,
73 0x47, 0x87, 0x6D, 0x46, 0xD6, 0x3E, 0x69, 0x64, 0x2A, 0xCE, 0xCB, 0x2F,
74 0xFC, 0x97, 0x05, 0x7A, 0xAC, 0x7F, 0xD5, 0x1A, 0x4B, 0x0E, 0xA7, 0x5A,
75 0x28, 0x14, 0x3F, 0x29, 0x88, 0x3C, 0x4C, 0x02, 0xB8, 0xDA, 0xB0, 0x17,
76 0x55, 0x1F, 0x8A, 0x7D, 0x57, 0xC7, 0x8D, 0x74, 0xB7, 0xC4, 0x9F, 0x72,
77 0x7E, 0x15, 0x22, 0x12, 0x58, 0x07, 0x99, 0x34, 0x6E, 0x50, 0xDE, 0x68,
78 0x65, 0xBC, 0xDB, 0xF8, 0xC8, 0xA8, 0x2B, 0x40, 0xDC, 0xFE, 0x32, 0xA4,
79 0xCA, 0x10, 0x21, 0xF0, 0xD3, 0x5D, 0x0F, 0x00, 0x6F, 0x9D, 0x36, 0x42,
80 0x4A, 0x5E, 0xC1, 0xE0
81};
82
83static const u8 q1[256] = {
84 0x75, 0xF3, 0xC6, 0xF4, 0xDB, 0x7B, 0xFB, 0xC8, 0x4A, 0xD3, 0xE6, 0x6B,
85 0x45, 0x7D, 0xE8, 0x4B, 0xD6, 0x32, 0xD8, 0xFD, 0x37, 0x71, 0xF1, 0xE1,
86 0x30, 0x0F, 0xF8, 0x1B, 0x87, 0xFA, 0x06, 0x3F, 0x5E, 0xBA, 0xAE, 0x5B,
87 0x8A, 0x00, 0xBC, 0x9D, 0x6D, 0xC1, 0xB1, 0x0E, 0x80, 0x5D, 0xD2, 0xD5,
88 0xA0, 0x84, 0x07, 0x14, 0xB5, 0x90, 0x2C, 0xA3, 0xB2, 0x73, 0x4C, 0x54,
89 0x92, 0x74, 0x36, 0x51, 0x38, 0xB0, 0xBD, 0x5A, 0xFC, 0x60, 0x62, 0x96,
90 0x6C, 0x42, 0xF7, 0x10, 0x7C, 0x28, 0x27, 0x8C, 0x13, 0x95, 0x9C, 0xC7,
91 0x24, 0x46, 0x3B, 0x70, 0xCA, 0xE3, 0x85, 0xCB, 0x11, 0xD0, 0x93, 0xB8,
92 0xA6, 0x83, 0x20, 0xFF, 0x9F, 0x77, 0xC3, 0xCC, 0x03, 0x6F, 0x08, 0xBF,
93 0x40, 0xE7, 0x2B, 0xE2, 0x79, 0x0C, 0xAA, 0x82, 0x41, 0x3A, 0xEA, 0xB9,
94 0xE4, 0x9A, 0xA4, 0x97, 0x7E, 0xDA, 0x7A, 0x17, 0x66, 0x94, 0xA1, 0x1D,
95 0x3D, 0xF0, 0xDE, 0xB3, 0x0B, 0x72, 0xA7, 0x1C, 0xEF, 0xD1, 0x53, 0x3E,
96 0x8F, 0x33, 0x26, 0x5F, 0xEC, 0x76, 0x2A, 0x49, 0x81, 0x88, 0xEE, 0x21,
97 0xC4, 0x1A, 0xEB, 0xD9, 0xC5, 0x39, 0x99, 0xCD, 0xAD, 0x31, 0x8B, 0x01,
98 0x18, 0x23, 0xDD, 0x1F, 0x4E, 0x2D, 0xF9, 0x48, 0x4F, 0xF2, 0x65, 0x8E,
99 0x78, 0x5C, 0x58, 0x19, 0x8D, 0xE5, 0x98, 0x57, 0x67, 0x7F, 0x05, 0x64,
100 0xAF, 0x63, 0xB6, 0xFE, 0xF5, 0xB7, 0x3C, 0xA5, 0xCE, 0xE9, 0x68, 0x44,
101 0xE0, 0x4D, 0x43, 0x69, 0x29, 0x2E, 0xAC, 0x15, 0x59, 0xA8, 0x0A, 0x9E,
102 0x6E, 0x47, 0xDF, 0x34, 0x35, 0x6A, 0xCF, 0xDC, 0x22, 0xC9, 0xC0, 0x9B,
103 0x89, 0xD4, 0xED, 0xAB, 0x12, 0xA2, 0x0D, 0x52, 0xBB, 0x02, 0x2F, 0xA9,
104 0xD7, 0x61, 0x1E, 0xB4, 0x50, 0x04, 0xF6, 0xC2, 0x16, 0x25, 0x86, 0x56,
105 0x55, 0x09, 0xBE, 0x91
106};
107
108/* These MDS tables are actually tables of MDS composed with q0 and q1,
109 * because it is only ever used that way and we can save some time by
110 * precomputing. Of course the main saving comes from precomputing the
111 * GF(2^8) multiplication involved in the MDS matrix multiply; by looking
112 * things up in these tables we reduce the matrix multiply to four lookups
113 * and three XORs. Semi-formally, the definition of these tables is:
114 * mds[0][i] = MDS (q1[i] 0 0 0)^T mds[1][i] = MDS (0 q0[i] 0 0)^T
115 * mds[2][i] = MDS (0 0 q1[i] 0)^T mds[3][i] = MDS (0 0 0 q0[i])^T
116 * where ^T means "transpose", the matrix multiply is performed in GF(2^8)
117 * represented as GF(2)[x]/v(x) where v(x)=x^8+x^6+x^5+x^3+1 as described
118 * by Schneier et al, and I'm casually glossing over the byte/word
119 * conversion issues. */
120
121static const u32 mds[4][256] = {
122 {0xBCBC3275, 0xECEC21F3, 0x202043C6, 0xB3B3C9F4, 0xDADA03DB, 0x02028B7B,
123 0xE2E22BFB, 0x9E9EFAC8, 0xC9C9EC4A, 0xD4D409D3, 0x18186BE6, 0x1E1E9F6B,
124 0x98980E45, 0xB2B2387D, 0xA6A6D2E8, 0x2626B74B, 0x3C3C57D6, 0x93938A32,
125 0x8282EED8, 0x525298FD, 0x7B7BD437, 0xBBBB3771, 0x5B5B97F1, 0x474783E1,
126 0x24243C30, 0x5151E20F, 0xBABAC6F8, 0x4A4AF31B, 0xBFBF4887, 0x0D0D70FA,
127 0xB0B0B306, 0x7575DE3F, 0xD2D2FD5E, 0x7D7D20BA, 0x666631AE, 0x3A3AA35B,
128 0x59591C8A, 0x00000000, 0xCDCD93BC, 0x1A1AE09D, 0xAEAE2C6D, 0x7F7FABC1,
129 0x2B2BC7B1, 0xBEBEB90E, 0xE0E0A080, 0x8A8A105D, 0x3B3B52D2, 0x6464BAD5,
130 0xD8D888A0, 0xE7E7A584, 0x5F5FE807, 0x1B1B1114, 0x2C2CC2B5, 0xFCFCB490,
131 0x3131272C, 0x808065A3, 0x73732AB2, 0x0C0C8173, 0x79795F4C, 0x6B6B4154,
132 0x4B4B0292, 0x53536974, 0x94948F36, 0x83831F51, 0x2A2A3638, 0xC4C49CB0,
133 0x2222C8BD, 0xD5D5F85A, 0xBDBDC3FC, 0x48487860, 0xFFFFCE62, 0x4C4C0796,
134 0x4141776C, 0xC7C7E642, 0xEBEB24F7, 0x1C1C1410, 0x5D5D637C, 0x36362228,
135 0x6767C027, 0xE9E9AF8C, 0x4444F913, 0x1414EA95, 0xF5F5BB9C, 0xCFCF18C7,
136 0x3F3F2D24, 0xC0C0E346, 0x7272DB3B, 0x54546C70, 0x29294CCA, 0xF0F035E3,
137 0x0808FE85, 0xC6C617CB, 0xF3F34F11, 0x8C8CE4D0, 0xA4A45993, 0xCACA96B8,
138 0x68683BA6, 0xB8B84D83, 0x38382820, 0xE5E52EFF, 0xADAD569F, 0x0B0B8477,
139 0xC8C81DC3, 0x9999FFCC, 0x5858ED03, 0x19199A6F, 0x0E0E0A08, 0x95957EBF,
140 0x70705040, 0xF7F730E7, 0x6E6ECF2B, 0x1F1F6EE2, 0xB5B53D79, 0x09090F0C,
141 0x616134AA, 0x57571682, 0x9F9F0B41, 0x9D9D803A, 0x111164EA, 0x2525CDB9,
142 0xAFAFDDE4, 0x4545089A, 0xDFDF8DA4, 0xA3A35C97, 0xEAEAD57E, 0x353558DA,
143 0xEDEDD07A, 0x4343FC17, 0xF8F8CB66, 0xFBFBB194, 0x3737D3A1, 0xFAFA401D,
144 0xC2C2683D, 0xB4B4CCF0, 0x32325DDE, 0x9C9C71B3, 0x5656E70B, 0xE3E3DA72,
145 0x878760A7, 0x15151B1C, 0xF9F93AEF, 0x6363BFD1, 0x3434A953, 0x9A9A853E,
146 0xB1B1428F, 0x7C7CD133, 0x88889B26, 0x3D3DA65F, 0xA1A1D7EC, 0xE4E4DF76,
147 0x8181942A, 0x91910149, 0x0F0FFB81, 0xEEEEAA88, 0x161661EE, 0xD7D77321,
148 0x9797F5C4, 0xA5A5A81A, 0xFEFE3FEB, 0x6D6DB5D9, 0x7878AEC5, 0xC5C56D39,
149 0x1D1DE599, 0x7676A4CD, 0x3E3EDCAD, 0xCBCB6731, 0xB6B6478B, 0xEFEF5B01,
150 0x12121E18, 0x6060C523, 0x6A6AB0DD, 0x4D4DF61F, 0xCECEE94E, 0xDEDE7C2D,
151 0x55559DF9, 0x7E7E5A48, 0x2121B24F, 0x03037AF2, 0xA0A02665, 0x5E5E198E,
152 0x5A5A6678, 0x65654B5C, 0x62624E58, 0xFDFD4519, 0x0606F48D, 0x404086E5,
153 0xF2F2BE98, 0x3333AC57, 0x17179067, 0x05058E7F, 0xE8E85E05, 0x4F4F7D64,
154 0x89896AAF, 0x10109563, 0x74742FB6, 0x0A0A75FE, 0x5C5C92F5, 0x9B9B74B7,
155 0x2D2D333C, 0x3030D6A5, 0x2E2E49CE, 0x494989E9, 0x46467268, 0x77775544,
156 0xA8A8D8E0, 0x9696044D, 0x2828BD43, 0xA9A92969, 0xD9D97929, 0x8686912E,
157 0xD1D187AC, 0xF4F44A15, 0x8D8D1559, 0xD6D682A8, 0xB9B9BC0A, 0x42420D9E,
158 0xF6F6C16E, 0x2F2FB847, 0xDDDD06DF, 0x23233934, 0xCCCC6235, 0xF1F1C46A,
159 0xC1C112CF, 0x8585EBDC, 0x8F8F9E22, 0x7171A1C9, 0x9090F0C0, 0xAAAA539B,
160 0x0101F189, 0x8B8BE1D4, 0x4E4E8CED, 0x8E8E6FAB, 0xABABA212, 0x6F6F3EA2,
161 0xE6E6540D, 0xDBDBF252, 0x92927BBB, 0xB7B7B602, 0x6969CA2F, 0x3939D9A9,
162 0xD3D30CD7, 0xA7A72361, 0xA2A2AD1E, 0xC3C399B4, 0x6C6C4450, 0x07070504,
163 0x04047FF6, 0x272746C2, 0xACACA716, 0xD0D07625, 0x50501386, 0xDCDCF756,
164 0x84841A55, 0xE1E15109, 0x7A7A25BE, 0x1313EF91},
165
166 {0xA9D93939, 0x67901717, 0xB3719C9C, 0xE8D2A6A6, 0x04050707, 0xFD985252,
167 0xA3658080, 0x76DFE4E4, 0x9A084545, 0x92024B4B, 0x80A0E0E0, 0x78665A5A,
168 0xE4DDAFAF, 0xDDB06A6A, 0xD1BF6363, 0x38362A2A, 0x0D54E6E6, 0xC6432020,
169 0x3562CCCC, 0x98BEF2F2, 0x181E1212, 0xF724EBEB, 0xECD7A1A1, 0x6C774141,
170 0x43BD2828, 0x7532BCBC, 0x37D47B7B, 0x269B8888, 0xFA700D0D, 0x13F94444,
171 0x94B1FBFB, 0x485A7E7E, 0xF27A0303, 0xD0E48C8C, 0x8B47B6B6, 0x303C2424,
172 0x84A5E7E7, 0x54416B6B, 0xDF06DDDD, 0x23C56060, 0x1945FDFD, 0x5BA33A3A,
173 0x3D68C2C2, 0x59158D8D, 0xF321ECEC, 0xAE316666, 0xA23E6F6F, 0x82165757,
174 0x63951010, 0x015BEFEF, 0x834DB8B8, 0x2E918686, 0xD9B56D6D, 0x511F8383,
175 0x9B53AAAA, 0x7C635D5D, 0xA63B6868, 0xEB3FFEFE, 0xA5D63030, 0xBE257A7A,
176 0x16A7ACAC, 0x0C0F0909, 0xE335F0F0, 0x6123A7A7, 0xC0F09090, 0x8CAFE9E9,
177 0x3A809D9D, 0xF5925C5C, 0x73810C0C, 0x2C273131, 0x2576D0D0, 0x0BE75656,
178 0xBB7B9292, 0x4EE9CECE, 0x89F10101, 0x6B9F1E1E, 0x53A93434, 0x6AC4F1F1,
179 0xB499C3C3, 0xF1975B5B, 0xE1834747, 0xE66B1818, 0xBDC82222, 0x450E9898,
180 0xE26E1F1F, 0xF4C9B3B3, 0xB62F7474, 0x66CBF8F8, 0xCCFF9999, 0x95EA1414,
181 0x03ED5858, 0x56F7DCDC, 0xD4E18B8B, 0x1C1B1515, 0x1EADA2A2, 0xD70CD3D3,
182 0xFB2BE2E2, 0xC31DC8C8, 0x8E195E5E, 0xB5C22C2C, 0xE9894949, 0xCF12C1C1,
183 0xBF7E9595, 0xBA207D7D, 0xEA641111, 0x77840B0B, 0x396DC5C5, 0xAF6A8989,
184 0x33D17C7C, 0xC9A17171, 0x62CEFFFF, 0x7137BBBB, 0x81FB0F0F, 0x793DB5B5,
185 0x0951E1E1, 0xADDC3E3E, 0x242D3F3F, 0xCDA47676, 0xF99D5555, 0xD8EE8282,
186 0xE5864040, 0xC5AE7878, 0xB9CD2525, 0x4D049696, 0x44557777, 0x080A0E0E,
187 0x86135050, 0xE730F7F7, 0xA1D33737, 0x1D40FAFA, 0xAA346161, 0xED8C4E4E,
188 0x06B3B0B0, 0x706C5454, 0xB22A7373, 0xD2523B3B, 0x410B9F9F, 0x7B8B0202,
189 0xA088D8D8, 0x114FF3F3, 0x3167CBCB, 0xC2462727, 0x27C06767, 0x90B4FCFC,
190 0x20283838, 0xF67F0404, 0x60784848, 0xFF2EE5E5, 0x96074C4C, 0x5C4B6565,
191 0xB1C72B2B, 0xAB6F8E8E, 0x9E0D4242, 0x9CBBF5F5, 0x52F2DBDB, 0x1BF34A4A,
192 0x5FA63D3D, 0x9359A4A4, 0x0ABCB9B9, 0xEF3AF9F9, 0x91EF1313, 0x85FE0808,
193 0x49019191, 0xEE611616, 0x2D7CDEDE, 0x4FB22121, 0x8F42B1B1, 0x3BDB7272,
194 0x47B82F2F, 0x8748BFBF, 0x6D2CAEAE, 0x46E3C0C0, 0xD6573C3C, 0x3E859A9A,
195 0x6929A9A9, 0x647D4F4F, 0x2A948181, 0xCE492E2E, 0xCB17C6C6, 0x2FCA6969,
196 0xFCC3BDBD, 0x975CA3A3, 0x055EE8E8, 0x7AD0EDED, 0xAC87D1D1, 0x7F8E0505,
197 0xD5BA6464, 0x1AA8A5A5, 0x4BB72626, 0x0EB9BEBE, 0xA7608787, 0x5AF8D5D5,
198 0x28223636, 0x14111B1B, 0x3FDE7575, 0x2979D9D9, 0x88AAEEEE, 0x3C332D2D,
199 0x4C5F7979, 0x02B6B7B7, 0xB896CACA, 0xDA583535, 0xB09CC4C4, 0x17FC4343,
200 0x551A8484, 0x1FF64D4D, 0x8A1C5959, 0x7D38B2B2, 0x57AC3333, 0xC718CFCF,
201 0x8DF40606, 0x74695353, 0xB7749B9B, 0xC4F59797, 0x9F56ADAD, 0x72DAE3E3,
202 0x7ED5EAEA, 0x154AF4F4, 0x229E8F8F, 0x12A2ABAB, 0x584E6262, 0x07E85F5F,
203 0x99E51D1D, 0x34392323, 0x6EC1F6F6, 0x50446C6C, 0xDE5D3232, 0x68724646,
204 0x6526A0A0, 0xBC93CDCD, 0xDB03DADA, 0xF8C6BABA, 0xC8FA9E9E, 0xA882D6D6,
205 0x2BCF6E6E, 0x40507070, 0xDCEB8585, 0xFE750A0A, 0x328A9393, 0xA48DDFDF,
206 0xCA4C2929, 0x10141C1C, 0x2173D7D7, 0xF0CCB4B4, 0xD309D4D4, 0x5D108A8A,
207 0x0FE25151, 0x00000000, 0x6F9A1919, 0x9DE01A1A, 0x368F9494, 0x42E6C7C7,
208 0x4AECC9C9, 0x5EFDD2D2, 0xC1AB7F7F, 0xE0D8A8A8},
209
210 {0xBC75BC32, 0xECF3EC21, 0x20C62043, 0xB3F4B3C9, 0xDADBDA03, 0x027B028B,
211 0xE2FBE22B, 0x9EC89EFA, 0xC94AC9EC, 0xD4D3D409, 0x18E6186B, 0x1E6B1E9F,
212 0x9845980E, 0xB27DB238, 0xA6E8A6D2, 0x264B26B7, 0x3CD63C57, 0x9332938A,
213 0x82D882EE, 0x52FD5298, 0x7B377BD4, 0xBB71BB37, 0x5BF15B97, 0x47E14783,
214 0x2430243C, 0x510F51E2, 0xBAF8BAC6, 0x4A1B4AF3, 0xBF87BF48, 0x0DFA0D70,
215 0xB006B0B3, 0x753F75DE, 0xD25ED2FD, 0x7DBA7D20, 0x66AE6631, 0x3A5B3AA3,
216 0x598A591C, 0x00000000, 0xCDBCCD93, 0x1A9D1AE0, 0xAE6DAE2C, 0x7FC17FAB,
217 0x2BB12BC7, 0xBE0EBEB9, 0xE080E0A0, 0x8A5D8A10, 0x3BD23B52, 0x64D564BA,
218 0xD8A0D888, 0xE784E7A5, 0x5F075FE8, 0x1B141B11, 0x2CB52CC2, 0xFC90FCB4,
219 0x312C3127, 0x80A38065, 0x73B2732A, 0x0C730C81, 0x794C795F, 0x6B546B41,
220 0x4B924B02, 0x53745369, 0x9436948F, 0x8351831F, 0x2A382A36, 0xC4B0C49C,
221 0x22BD22C8, 0xD55AD5F8, 0xBDFCBDC3, 0x48604878, 0xFF62FFCE, 0x4C964C07,
222 0x416C4177, 0xC742C7E6, 0xEBF7EB24, 0x1C101C14, 0x5D7C5D63, 0x36283622,
223 0x672767C0, 0xE98CE9AF, 0x441344F9, 0x149514EA, 0xF59CF5BB, 0xCFC7CF18,
224 0x3F243F2D, 0xC046C0E3, 0x723B72DB, 0x5470546C, 0x29CA294C, 0xF0E3F035,
225 0x088508FE, 0xC6CBC617, 0xF311F34F, 0x8CD08CE4, 0xA493A459, 0xCAB8CA96,
226 0x68A6683B, 0xB883B84D, 0x38203828, 0xE5FFE52E, 0xAD9FAD56, 0x0B770B84,
227 0xC8C3C81D, 0x99CC99FF, 0x580358ED, 0x196F199A, 0x0E080E0A, 0x95BF957E,
228 0x70407050, 0xF7E7F730, 0x6E2B6ECF, 0x1FE21F6E, 0xB579B53D, 0x090C090F,
229 0x61AA6134, 0x57825716, 0x9F419F0B, 0x9D3A9D80, 0x11EA1164, 0x25B925CD,
230 0xAFE4AFDD, 0x459A4508, 0xDFA4DF8D, 0xA397A35C, 0xEA7EEAD5, 0x35DA3558,
231 0xED7AEDD0, 0x431743FC, 0xF866F8CB, 0xFB94FBB1, 0x37A137D3, 0xFA1DFA40,
232 0xC23DC268, 0xB4F0B4CC, 0x32DE325D, 0x9CB39C71, 0x560B56E7, 0xE372E3DA,
233 0x87A78760, 0x151C151B, 0xF9EFF93A, 0x63D163BF, 0x345334A9, 0x9A3E9A85,
234 0xB18FB142, 0x7C337CD1, 0x8826889B, 0x3D5F3DA6, 0xA1ECA1D7, 0xE476E4DF,
235 0x812A8194, 0x91499101, 0x0F810FFB, 0xEE88EEAA, 0x16EE1661, 0xD721D773,
236 0x97C497F5, 0xA51AA5A8, 0xFEEBFE3F, 0x6DD96DB5, 0x78C578AE, 0xC539C56D,
237 0x1D991DE5, 0x76CD76A4, 0x3EAD3EDC, 0xCB31CB67, 0xB68BB647, 0xEF01EF5B,
238 0x1218121E, 0x602360C5, 0x6ADD6AB0, 0x4D1F4DF6, 0xCE4ECEE9, 0xDE2DDE7C,
239 0x55F9559D, 0x7E487E5A, 0x214F21B2, 0x03F2037A, 0xA065A026, 0x5E8E5E19,
240 0x5A785A66, 0x655C654B, 0x6258624E, 0xFD19FD45, 0x068D06F4, 0x40E54086,
241 0xF298F2BE, 0x335733AC, 0x17671790, 0x057F058E, 0xE805E85E, 0x4F644F7D,
242 0x89AF896A, 0x10631095, 0x74B6742F, 0x0AFE0A75, 0x5CF55C92, 0x9BB79B74,
243 0x2D3C2D33, 0x30A530D6, 0x2ECE2E49, 0x49E94989, 0x46684672, 0x77447755,
244 0xA8E0A8D8, 0x964D9604, 0x284328BD, 0xA969A929, 0xD929D979, 0x862E8691,
245 0xD1ACD187, 0xF415F44A, 0x8D598D15, 0xD6A8D682, 0xB90AB9BC, 0x429E420D,
246 0xF66EF6C1, 0x2F472FB8, 0xDDDFDD06, 0x23342339, 0xCC35CC62, 0xF16AF1C4,
247 0xC1CFC112, 0x85DC85EB, 0x8F228F9E, 0x71C971A1, 0x90C090F0, 0xAA9BAA53,
248 0x018901F1, 0x8BD48BE1, 0x4EED4E8C, 0x8EAB8E6F, 0xAB12ABA2, 0x6FA26F3E,
249 0xE60DE654, 0xDB52DBF2, 0x92BB927B, 0xB702B7B6, 0x692F69CA, 0x39A939D9,
250 0xD3D7D30C, 0xA761A723, 0xA21EA2AD, 0xC3B4C399, 0x6C506C44, 0x07040705,
251 0x04F6047F, 0x27C22746, 0xAC16ACA7, 0xD025D076, 0x50865013, 0xDC56DCF7,
252 0x8455841A, 0xE109E151, 0x7ABE7A25, 0x139113EF},
253
254 {0xD939A9D9, 0x90176790, 0x719CB371, 0xD2A6E8D2, 0x05070405, 0x9852FD98,
255 0x6580A365, 0xDFE476DF, 0x08459A08, 0x024B9202, 0xA0E080A0, 0x665A7866,
256 0xDDAFE4DD, 0xB06ADDB0, 0xBF63D1BF, 0x362A3836, 0x54E60D54, 0x4320C643,
257 0x62CC3562, 0xBEF298BE, 0x1E12181E, 0x24EBF724, 0xD7A1ECD7, 0x77416C77,
258 0xBD2843BD, 0x32BC7532, 0xD47B37D4, 0x9B88269B, 0x700DFA70, 0xF94413F9,
259 0xB1FB94B1, 0x5A7E485A, 0x7A03F27A, 0xE48CD0E4, 0x47B68B47, 0x3C24303C,
260 0xA5E784A5, 0x416B5441, 0x06DDDF06, 0xC56023C5, 0x45FD1945, 0xA33A5BA3,
261 0x68C23D68, 0x158D5915, 0x21ECF321, 0x3166AE31, 0x3E6FA23E, 0x16578216,
262 0x95106395, 0x5BEF015B, 0x4DB8834D, 0x91862E91, 0xB56DD9B5, 0x1F83511F,
263 0x53AA9B53, 0x635D7C63, 0x3B68A63B, 0x3FFEEB3F, 0xD630A5D6, 0x257ABE25,
264 0xA7AC16A7, 0x0F090C0F, 0x35F0E335, 0x23A76123, 0xF090C0F0, 0xAFE98CAF,
265 0x809D3A80, 0x925CF592, 0x810C7381, 0x27312C27, 0x76D02576, 0xE7560BE7,
266 0x7B92BB7B, 0xE9CE4EE9, 0xF10189F1, 0x9F1E6B9F, 0xA93453A9, 0xC4F16AC4,
267 0x99C3B499, 0x975BF197, 0x8347E183, 0x6B18E66B, 0xC822BDC8, 0x0E98450E,
268 0x6E1FE26E, 0xC9B3F4C9, 0x2F74B62F, 0xCBF866CB, 0xFF99CCFF, 0xEA1495EA,
269 0xED5803ED, 0xF7DC56F7, 0xE18BD4E1, 0x1B151C1B, 0xADA21EAD, 0x0CD3D70C,
270 0x2BE2FB2B, 0x1DC8C31D, 0x195E8E19, 0xC22CB5C2, 0x8949E989, 0x12C1CF12,
271 0x7E95BF7E, 0x207DBA20, 0x6411EA64, 0x840B7784, 0x6DC5396D, 0x6A89AF6A,
272 0xD17C33D1, 0xA171C9A1, 0xCEFF62CE, 0x37BB7137, 0xFB0F81FB, 0x3DB5793D,
273 0x51E10951, 0xDC3EADDC, 0x2D3F242D, 0xA476CDA4, 0x9D55F99D, 0xEE82D8EE,
274 0x8640E586, 0xAE78C5AE, 0xCD25B9CD, 0x04964D04, 0x55774455, 0x0A0E080A,
275 0x13508613, 0x30F7E730, 0xD337A1D3, 0x40FA1D40, 0x3461AA34, 0x8C4EED8C,
276 0xB3B006B3, 0x6C54706C, 0x2A73B22A, 0x523BD252, 0x0B9F410B, 0x8B027B8B,
277 0x88D8A088, 0x4FF3114F, 0x67CB3167, 0x4627C246, 0xC06727C0, 0xB4FC90B4,
278 0x28382028, 0x7F04F67F, 0x78486078, 0x2EE5FF2E, 0x074C9607, 0x4B655C4B,
279 0xC72BB1C7, 0x6F8EAB6F, 0x0D429E0D, 0xBBF59CBB, 0xF2DB52F2, 0xF34A1BF3,
280 0xA63D5FA6, 0x59A49359, 0xBCB90ABC, 0x3AF9EF3A, 0xEF1391EF, 0xFE0885FE,
281 0x01914901, 0x6116EE61, 0x7CDE2D7C, 0xB2214FB2, 0x42B18F42, 0xDB723BDB,
282 0xB82F47B8, 0x48BF8748, 0x2CAE6D2C, 0xE3C046E3, 0x573CD657, 0x859A3E85,
283 0x29A96929, 0x7D4F647D, 0x94812A94, 0x492ECE49, 0x17C6CB17, 0xCA692FCA,
284 0xC3BDFCC3, 0x5CA3975C, 0x5EE8055E, 0xD0ED7AD0, 0x87D1AC87, 0x8E057F8E,
285 0xBA64D5BA, 0xA8A51AA8, 0xB7264BB7, 0xB9BE0EB9, 0x6087A760, 0xF8D55AF8,
286 0x22362822, 0x111B1411, 0xDE753FDE, 0x79D92979, 0xAAEE88AA, 0x332D3C33,
287 0x5F794C5F, 0xB6B702B6, 0x96CAB896, 0x5835DA58, 0x9CC4B09C, 0xFC4317FC,
288 0x1A84551A, 0xF64D1FF6, 0x1C598A1C, 0x38B27D38, 0xAC3357AC, 0x18CFC718,
289 0xF4068DF4, 0x69537469, 0x749BB774, 0xF597C4F5, 0x56AD9F56, 0xDAE372DA,
290 0xD5EA7ED5, 0x4AF4154A, 0x9E8F229E, 0xA2AB12A2, 0x4E62584E, 0xE85F07E8,
291 0xE51D99E5, 0x39233439, 0xC1F66EC1, 0x446C5044, 0x5D32DE5D, 0x72466872,
292 0x26A06526, 0x93CDBC93, 0x03DADB03, 0xC6BAF8C6, 0xFA9EC8FA, 0x82D6A882,
293 0xCF6E2BCF, 0x50704050, 0xEB85DCEB, 0x750AFE75, 0x8A93328A, 0x8DDFA48D,
294 0x4C29CA4C, 0x141C1014, 0x73D72173, 0xCCB4F0CC, 0x09D4D309, 0x108A5D10,
295 0xE2510FE2, 0x00000000, 0x9A196F9A, 0xE01A9DE0, 0x8F94368F, 0xE6C742E6,
296 0xECC94AEC, 0xFDD25EFD, 0xAB7FC1AB, 0xD8A8E0D8}
297};
298
299/* The exp_to_poly and poly_to_exp tables are used to perform efficient
300 * operations in GF(2^8) represented as GF(2)[x]/w(x) where
301 * w(x)=x^8+x^6+x^3+x^2+1. We care about doing that because it's part of the
302 * definition of the RS matrix in the key schedule. Elements of that field
303 * are polynomials of degree not greater than 7 and all coefficients 0 or 1,
304 * which can be represented naturally by bytes (just substitute x=2). In that
305 * form, GF(2^8) addition is the same as bitwise XOR, but GF(2^8)
306 * multiplication is inefficient without hardware support. To multiply
307 * faster, I make use of the fact x is a generator for the nonzero elements,
308 * so that every element p of GF(2)[x]/w(x) is either 0 or equal to (x)^n for
309 * some n in 0..254. Note that that caret is exponentiation in GF(2^8),
310 * *not* polynomial notation. So if I want to compute pq where p and q are
311 * in GF(2^8), I can just say:
312 * 1. if p=0 or q=0 then pq=0
313 * 2. otherwise, find m and n such that p=x^m and q=x^n
314 * 3. pq=(x^m)(x^n)=x^(m+n), so add m and n and find pq
315 * The translations in steps 2 and 3 are looked up in the tables
316 * poly_to_exp (for step 2) and exp_to_poly (for step 3). To see this
317 * in action, look at the CALC_S macro. As additional wrinkles, note that
318 * one of my operands is always a constant, so the poly_to_exp lookup on it
319 * is done in advance; I included the original values in the comments so
320 * readers can have some chance of recognizing that this *is* the RS matrix
321 * from the Twofish paper. I've only included the table entries I actually
322 * need; I never do a lookup on a variable input of zero and the biggest
323 * exponents I'll ever see are 254 (variable) and 237 (constant), so they'll
324 * never sum to more than 491. I'm repeating part of the exp_to_poly table
325 * so that I don't have to do mod-255 reduction in the exponent arithmetic.
326 * Since I know my constant operands are never zero, I only have to worry
327 * about zero values in the variable operand, and I do it with a simple
328 * conditional branch. I know conditionals are expensive, but I couldn't
329 * see a non-horrible way of avoiding them, and I did manage to group the
330 * statements so that each if covers four group multiplications. */
331
332static const u8 poly_to_exp[255] = {
333 0x00, 0x01, 0x17, 0x02, 0x2E, 0x18, 0x53, 0x03, 0x6A, 0x2F, 0x93, 0x19,
334 0x34, 0x54, 0x45, 0x04, 0x5C, 0x6B, 0xB6, 0x30, 0xA6, 0x94, 0x4B, 0x1A,
335 0x8C, 0x35, 0x81, 0x55, 0xAA, 0x46, 0x0D, 0x05, 0x24, 0x5D, 0x87, 0x6C,
336 0x9B, 0xB7, 0xC1, 0x31, 0x2B, 0xA7, 0xA3, 0x95, 0x98, 0x4C, 0xCA, 0x1B,
337 0xE6, 0x8D, 0x73, 0x36, 0xCD, 0x82, 0x12, 0x56, 0x62, 0xAB, 0xF0, 0x47,
338 0x4F, 0x0E, 0xBD, 0x06, 0xD4, 0x25, 0xD2, 0x5E, 0x27, 0x88, 0x66, 0x6D,
339 0xD6, 0x9C, 0x79, 0xB8, 0x08, 0xC2, 0xDF, 0x32, 0x68, 0x2C, 0xFD, 0xA8,
340 0x8A, 0xA4, 0x5A, 0x96, 0x29, 0x99, 0x22, 0x4D, 0x60, 0xCB, 0xE4, 0x1C,
341 0x7B, 0xE7, 0x3B, 0x8E, 0x9E, 0x74, 0xF4, 0x37, 0xD8, 0xCE, 0xF9, 0x83,
342 0x6F, 0x13, 0xB2, 0x57, 0xE1, 0x63, 0xDC, 0xAC, 0xC4, 0xF1, 0xAF, 0x48,
343 0x0A, 0x50, 0x42, 0x0F, 0xBA, 0xBE, 0xC7, 0x07, 0xDE, 0xD5, 0x78, 0x26,
344 0x65, 0xD3, 0xD1, 0x5F, 0xE3, 0x28, 0x21, 0x89, 0x59, 0x67, 0xFC, 0x6E,
345 0xB1, 0xD7, 0xF8, 0x9D, 0xF3, 0x7A, 0x3A, 0xB9, 0xC6, 0x09, 0x41, 0xC3,
346 0xAE, 0xE0, 0xDB, 0x33, 0x44, 0x69, 0x92, 0x2D, 0x52, 0xFE, 0x16, 0xA9,
347 0x0C, 0x8B, 0x80, 0xA5, 0x4A, 0x5B, 0xB5, 0x97, 0xC9, 0x2A, 0xA2, 0x9A,
348 0xC0, 0x23, 0x86, 0x4E, 0xBC, 0x61, 0xEF, 0xCC, 0x11, 0xE5, 0x72, 0x1D,
349 0x3D, 0x7C, 0xEB, 0xE8, 0xE9, 0x3C, 0xEA, 0x8F, 0x7D, 0x9F, 0xEC, 0x75,
350 0x1E, 0xF5, 0x3E, 0x38, 0xF6, 0xD9, 0x3F, 0xCF, 0x76, 0xFA, 0x1F, 0x84,
351 0xA0, 0x70, 0xED, 0x14, 0x90, 0xB3, 0x7E, 0x58, 0xFB, 0xE2, 0x20, 0x64,
352 0xD0, 0xDD, 0x77, 0xAD, 0xDA, 0xC5, 0x40, 0xF2, 0x39, 0xB0, 0xF7, 0x49,
353 0xB4, 0x0B, 0x7F, 0x51, 0x15, 0x43, 0x91, 0x10, 0x71, 0xBB, 0xEE, 0xBF,
354 0x85, 0xC8, 0xA1
355};
356
357static const u8 exp_to_poly[492] = {
358 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x4D, 0x9A, 0x79, 0xF2,
359 0xA9, 0x1F, 0x3E, 0x7C, 0xF8, 0xBD, 0x37, 0x6E, 0xDC, 0xF5, 0xA7, 0x03,
360 0x06, 0x0C, 0x18, 0x30, 0x60, 0xC0, 0xCD, 0xD7, 0xE3, 0x8B, 0x5B, 0xB6,
361 0x21, 0x42, 0x84, 0x45, 0x8A, 0x59, 0xB2, 0x29, 0x52, 0xA4, 0x05, 0x0A,
362 0x14, 0x28, 0x50, 0xA0, 0x0D, 0x1A, 0x34, 0x68, 0xD0, 0xED, 0x97, 0x63,
363 0xC6, 0xC1, 0xCF, 0xD3, 0xEB, 0x9B, 0x7B, 0xF6, 0xA1, 0x0F, 0x1E, 0x3C,
364 0x78, 0xF0, 0xAD, 0x17, 0x2E, 0x5C, 0xB8, 0x3D, 0x7A, 0xF4, 0xA5, 0x07,
365 0x0E, 0x1C, 0x38, 0x70, 0xE0, 0x8D, 0x57, 0xAE, 0x11, 0x22, 0x44, 0x88,
366 0x5D, 0xBA, 0x39, 0x72, 0xE4, 0x85, 0x47, 0x8E, 0x51, 0xA2, 0x09, 0x12,
367 0x24, 0x48, 0x90, 0x6D, 0xDA, 0xF9, 0xBF, 0x33, 0x66, 0xCC, 0xD5, 0xE7,
368 0x83, 0x4B, 0x96, 0x61, 0xC2, 0xC9, 0xDF, 0xF3, 0xAB, 0x1B, 0x36, 0x6C,
369 0xD8, 0xFD, 0xB7, 0x23, 0x46, 0x8C, 0x55, 0xAA, 0x19, 0x32, 0x64, 0xC8,
370 0xDD, 0xF7, 0xA3, 0x0B, 0x16, 0x2C, 0x58, 0xB0, 0x2D, 0x5A, 0xB4, 0x25,
371 0x4A, 0x94, 0x65, 0xCA, 0xD9, 0xFF, 0xB3, 0x2B, 0x56, 0xAC, 0x15, 0x2A,
372 0x54, 0xA8, 0x1D, 0x3A, 0x74, 0xE8, 0x9D, 0x77, 0xEE, 0x91, 0x6F, 0xDE,
373 0xF1, 0xAF, 0x13, 0x26, 0x4C, 0x98, 0x7D, 0xFA, 0xB9, 0x3F, 0x7E, 0xFC,
374 0xB5, 0x27, 0x4E, 0x9C, 0x75, 0xEA, 0x99, 0x7F, 0xFE, 0xB1, 0x2F, 0x5E,
375 0xBC, 0x35, 0x6A, 0xD4, 0xE5, 0x87, 0x43, 0x86, 0x41, 0x82, 0x49, 0x92,
376 0x69, 0xD2, 0xE9, 0x9F, 0x73, 0xE6, 0x81, 0x4F, 0x9E, 0x71, 0xE2, 0x89,
377 0x5F, 0xBE, 0x31, 0x62, 0xC4, 0xC5, 0xC7, 0xC3, 0xCB, 0xDB, 0xFB, 0xBB,
378 0x3B, 0x76, 0xEC, 0x95, 0x67, 0xCE, 0xD1, 0xEF, 0x93, 0x6B, 0xD6, 0xE1,
379 0x8F, 0x53, 0xA6, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x4D,
380 0x9A, 0x79, 0xF2, 0xA9, 0x1F, 0x3E, 0x7C, 0xF8, 0xBD, 0x37, 0x6E, 0xDC,
381 0xF5, 0xA7, 0x03, 0x06, 0x0C, 0x18, 0x30, 0x60, 0xC0, 0xCD, 0xD7, 0xE3,
382 0x8B, 0x5B, 0xB6, 0x21, 0x42, 0x84, 0x45, 0x8A, 0x59, 0xB2, 0x29, 0x52,
383 0xA4, 0x05, 0x0A, 0x14, 0x28, 0x50, 0xA0, 0x0D, 0x1A, 0x34, 0x68, 0xD0,
384 0xED, 0x97, 0x63, 0xC6, 0xC1, 0xCF, 0xD3, 0xEB, 0x9B, 0x7B, 0xF6, 0xA1,
385 0x0F, 0x1E, 0x3C, 0x78, 0xF0, 0xAD, 0x17, 0x2E, 0x5C, 0xB8, 0x3D, 0x7A,
386 0xF4, 0xA5, 0x07, 0x0E, 0x1C, 0x38, 0x70, 0xE0, 0x8D, 0x57, 0xAE, 0x11,
387 0x22, 0x44, 0x88, 0x5D, 0xBA, 0x39, 0x72, 0xE4, 0x85, 0x47, 0x8E, 0x51,
388 0xA2, 0x09, 0x12, 0x24, 0x48, 0x90, 0x6D, 0xDA, 0xF9, 0xBF, 0x33, 0x66,
389 0xCC, 0xD5, 0xE7, 0x83, 0x4B, 0x96, 0x61, 0xC2, 0xC9, 0xDF, 0xF3, 0xAB,
390 0x1B, 0x36, 0x6C, 0xD8, 0xFD, 0xB7, 0x23, 0x46, 0x8C, 0x55, 0xAA, 0x19,
391 0x32, 0x64, 0xC8, 0xDD, 0xF7, 0xA3, 0x0B, 0x16, 0x2C, 0x58, 0xB0, 0x2D,
392 0x5A, 0xB4, 0x25, 0x4A, 0x94, 0x65, 0xCA, 0xD9, 0xFF, 0xB3, 0x2B, 0x56,
393 0xAC, 0x15, 0x2A, 0x54, 0xA8, 0x1D, 0x3A, 0x74, 0xE8, 0x9D, 0x77, 0xEE,
394 0x91, 0x6F, 0xDE, 0xF1, 0xAF, 0x13, 0x26, 0x4C, 0x98, 0x7D, 0xFA, 0xB9,
395 0x3F, 0x7E, 0xFC, 0xB5, 0x27, 0x4E, 0x9C, 0x75, 0xEA, 0x99, 0x7F, 0xFE,
396 0xB1, 0x2F, 0x5E, 0xBC, 0x35, 0x6A, 0xD4, 0xE5, 0x87, 0x43, 0x86, 0x41,
397 0x82, 0x49, 0x92, 0x69, 0xD2, 0xE9, 0x9F, 0x73, 0xE6, 0x81, 0x4F, 0x9E,
398 0x71, 0xE2, 0x89, 0x5F, 0xBE, 0x31, 0x62, 0xC4, 0xC5, 0xC7, 0xC3, 0xCB
399};
400
401
402/* The table constants are indices of
403 * S-box entries, preprocessed through q0 and q1. */
404static const u8 calc_sb_tbl[512] = {
405 0xA9, 0x75, 0x67, 0xF3, 0xB3, 0xC6, 0xE8, 0xF4,
406 0x04, 0xDB, 0xFD, 0x7B, 0xA3, 0xFB, 0x76, 0xC8,
407 0x9A, 0x4A, 0x92, 0xD3, 0x80, 0xE6, 0x78, 0x6B,
408 0xE4, 0x45, 0xDD, 0x7D, 0xD1, 0xE8, 0x38, 0x4B,
409 0x0D, 0xD6, 0xC6, 0x32, 0x35, 0xD8, 0x98, 0xFD,
410 0x18, 0x37, 0xF7, 0x71, 0xEC, 0xF1, 0x6C, 0xE1,
411 0x43, 0x30, 0x75, 0x0F, 0x37, 0xF8, 0x26, 0x1B,
412 0xFA, 0x87, 0x13, 0xFA, 0x94, 0x06, 0x48, 0x3F,
413 0xF2, 0x5E, 0xD0, 0xBA, 0x8B, 0xAE, 0x30, 0x5B,
414 0x84, 0x8A, 0x54, 0x00, 0xDF, 0xBC, 0x23, 0x9D,
415 0x19, 0x6D, 0x5B, 0xC1, 0x3D, 0xB1, 0x59, 0x0E,
416 0xF3, 0x80, 0xAE, 0x5D, 0xA2, 0xD2, 0x82, 0xD5,
417 0x63, 0xA0, 0x01, 0x84, 0x83, 0x07, 0x2E, 0x14,
418 0xD9, 0xB5, 0x51, 0x90, 0x9B, 0x2C, 0x7C, 0xA3,
419 0xA6, 0xB2, 0xEB, 0x73, 0xA5, 0x4C, 0xBE, 0x54,
420 0x16, 0x92, 0x0C, 0x74, 0xE3, 0x36, 0x61, 0x51,
421 0xC0, 0x38, 0x8C, 0xB0, 0x3A, 0xBD, 0xF5, 0x5A,
422 0x73, 0xFC, 0x2C, 0x60, 0x25, 0x62, 0x0B, 0x96,
423 0xBB, 0x6C, 0x4E, 0x42, 0x89, 0xF7, 0x6B, 0x10,
424 0x53, 0x7C, 0x6A, 0x28, 0xB4, 0x27, 0xF1, 0x8C,
425 0xE1, 0x13, 0xE6, 0x95, 0xBD, 0x9C, 0x45, 0xC7,
426 0xE2, 0x24, 0xF4, 0x46, 0xB6, 0x3B, 0x66, 0x70,
427 0xCC, 0xCA, 0x95, 0xE3, 0x03, 0x85, 0x56, 0xCB,
428 0xD4, 0x11, 0x1C, 0xD0, 0x1E, 0x93, 0xD7, 0xB8,
429 0xFB, 0xA6, 0xC3, 0x83, 0x8E, 0x20, 0xB5, 0xFF,
430 0xE9, 0x9F, 0xCF, 0x77, 0xBF, 0xC3, 0xBA, 0xCC,
431 0xEA, 0x03, 0x77, 0x6F, 0x39, 0x08, 0xAF, 0xBF,
432 0x33, 0x40, 0xC9, 0xE7, 0x62, 0x2B, 0x71, 0xE2,
433 0x81, 0x79, 0x79, 0x0C, 0x09, 0xAA, 0xAD, 0x82,
434 0x24, 0x41, 0xCD, 0x3A, 0xF9, 0xEA, 0xD8, 0xB9,
435 0xE5, 0xE4, 0xC5, 0x9A, 0xB9, 0xA4, 0x4D, 0x97,
436 0x44, 0x7E, 0x08, 0xDA, 0x86, 0x7A, 0xE7, 0x17,
437 0xA1, 0x66, 0x1D, 0x94, 0xAA, 0xA1, 0xED, 0x1D,
438 0x06, 0x3D, 0x70, 0xF0, 0xB2, 0xDE, 0xD2, 0xB3,
439 0x41, 0x0B, 0x7B, 0x72, 0xA0, 0xA7, 0x11, 0x1C,
440 0x31, 0xEF, 0xC2, 0xD1, 0x27, 0x53, 0x90, 0x3E,
441 0x20, 0x8F, 0xF6, 0x33, 0x60, 0x26, 0xFF, 0x5F,
442 0x96, 0xEC, 0x5C, 0x76, 0xB1, 0x2A, 0xAB, 0x49,
443 0x9E, 0x81, 0x9C, 0x88, 0x52, 0xEE, 0x1B, 0x21,
444 0x5F, 0xC4, 0x93, 0x1A, 0x0A, 0xEB, 0xEF, 0xD9,
445 0x91, 0xC5, 0x85, 0x39, 0x49, 0x99, 0xEE, 0xCD,
446 0x2D, 0xAD, 0x4F, 0x31, 0x8F, 0x8B, 0x3B, 0x01,
447 0x47, 0x18, 0x87, 0x23, 0x6D, 0xDD, 0x46, 0x1F,
448 0xD6, 0x4E, 0x3E, 0x2D, 0x69, 0xF9, 0x64, 0x48,
449 0x2A, 0x4F, 0xCE, 0xF2, 0xCB, 0x65, 0x2F, 0x8E,
450 0xFC, 0x78, 0x97, 0x5C, 0x05, 0x58, 0x7A, 0x19,
451 0xAC, 0x8D, 0x7F, 0xE5, 0xD5, 0x98, 0x1A, 0x57,
452 0x4B, 0x67, 0x0E, 0x7F, 0xA7, 0x05, 0x5A, 0x64,
453 0x28, 0xAF, 0x14, 0x63, 0x3F, 0xB6, 0x29, 0xFE,
454 0x88, 0xF5, 0x3C, 0xB7, 0x4C, 0x3C, 0x02, 0xA5,
455 0xB8, 0xCE, 0xDA, 0xE9, 0xB0, 0x68, 0x17, 0x44,
456 0x55, 0xE0, 0x1F, 0x4D, 0x8A, 0x43, 0x7D, 0x69,
457 0x57, 0x29, 0xC7, 0x2E, 0x8D, 0xAC, 0x74, 0x15,
458 0xB7, 0x59, 0xC4, 0xA8, 0x9F, 0x0A, 0x72, 0x9E,
459 0x7E, 0x6E, 0x15, 0x47, 0x22, 0xDF, 0x12, 0x34,
460 0x58, 0x35, 0x07, 0x6A, 0x99, 0xCF, 0x34, 0xDC,
461 0x6E, 0x22, 0x50, 0xC9, 0xDE, 0xC0, 0x68, 0x9B,
462 0x65, 0x89, 0xBC, 0xD4, 0xDB, 0xED, 0xF8, 0xAB,
463 0xC8, 0x12, 0xA8, 0xA2, 0x2B, 0x0D, 0x40, 0x52,
464 0xDC, 0xBB, 0xFE, 0x02, 0x32, 0x2F, 0xA4, 0xA9,
465 0xCA, 0xD7, 0x10, 0x61, 0x21, 0x1E, 0xF0, 0xB4,
466 0xD3, 0x50, 0x5D, 0x04, 0x0F, 0xF6, 0x00, 0xC2,
467 0x6F, 0x16, 0x9D, 0x25, 0x36, 0x86, 0x42, 0x56,
468 0x4A, 0x55, 0x5E, 0x09, 0xC1, 0xBE, 0xE0, 0x91
469};
470
471/* Macro to perform one column of the RS matrix multiplication. The
472 * parameters a, b, c, and d are the four bytes of output; i is the index
473 * of the key bytes, and w, x, y, and z, are the column of constants from
474 * the RS matrix, preprocessed through the poly_to_exp table. */
475
476#define CALC_S(a, b, c, d, i, w, x, y, z) \
477 if (key[i]) { \
478 tmp = poly_to_exp[key[i] - 1]; \
479 (a) ^= exp_to_poly[tmp + (w)]; \
480 (b) ^= exp_to_poly[tmp + (x)]; \
481 (c) ^= exp_to_poly[tmp + (y)]; \
482 (d) ^= exp_to_poly[tmp + (z)]; \
483 }
484
485/* Macros to calculate the key-dependent S-boxes for a 128-bit key using
486 * the S vector from CALC_S. CALC_SB_2 computes a single entry in all
487 * four S-boxes, where i is the index of the entry to compute, and a and b
488 * are the index numbers preprocessed through the q0 and q1 tables
489 * respectively. */
490
491#define CALC_SB_2(i, a, b) \
492 ctx->s[0][i] = mds[0][q0[(a) ^ sa] ^ se]; \
493 ctx->s[1][i] = mds[1][q0[(b) ^ sb] ^ sf]; \
494 ctx->s[2][i] = mds[2][q1[(a) ^ sc] ^ sg]; \
495 ctx->s[3][i] = mds[3][q1[(b) ^ sd] ^ sh]
496
497/* Macro exactly like CALC_SB_2, but for 192-bit keys. */
498
499#define CALC_SB192_2(i, a, b) \
500 ctx->s[0][i] = mds[0][q0[q0[(b) ^ sa] ^ se] ^ si]; \
501 ctx->s[1][i] = mds[1][q0[q1[(b) ^ sb] ^ sf] ^ sj]; \
502 ctx->s[2][i] = mds[2][q1[q0[(a) ^ sc] ^ sg] ^ sk]; \
503 ctx->s[3][i] = mds[3][q1[q1[(a) ^ sd] ^ sh] ^ sl];
504
505/* Macro exactly like CALC_SB_2, but for 256-bit keys. */
506
507#define CALC_SB256_2(i, a, b) \
508 ctx->s[0][i] = mds[0][q0[q0[q1[(b) ^ sa] ^ se] ^ si] ^ sm]; \
509 ctx->s[1][i] = mds[1][q0[q1[q1[(a) ^ sb] ^ sf] ^ sj] ^ sn]; \
510 ctx->s[2][i] = mds[2][q1[q0[q0[(a) ^ sc] ^ sg] ^ sk] ^ so]; \
511 ctx->s[3][i] = mds[3][q1[q1[q0[(b) ^ sd] ^ sh] ^ sl] ^ sp];
512
513/* Macros to calculate the whitening and round subkeys. CALC_K_2 computes the
514 * last two stages of the h() function for a given index (either 2i or 2i+1).
515 * a, b, c, and d are the four bytes going into the last two stages. For
516 * 128-bit keys, this is the entire h() function and a and c are the index
517 * preprocessed through q0 and q1 respectively; for longer keys they are the
518 * output of previous stages. j is the index of the first key byte to use.
519 * CALC_K computes a pair of subkeys for 128-bit Twofish, by calling CALC_K_2
520 * twice, doing the Pseudo-Hadamard Transform, and doing the necessary
521 * rotations. Its parameters are: a, the array to write the results into,
522 * j, the index of the first output entry, k and l, the preprocessed indices
523 * for index 2i, and m and n, the preprocessed indices for index 2i+1.
524 * CALC_K192_2 expands CALC_K_2 to handle 192-bit keys, by doing an
525 * additional lookup-and-XOR stage. The parameters a, b, c and d are the
526 * four bytes going into the last three stages. For 192-bit keys, c = d
527 * are the index preprocessed through q0, and a = b are the index
528 * preprocessed through q1; j is the index of the first key byte to use.
529 * CALC_K192 is identical to CALC_K but for using the CALC_K192_2 macro
530 * instead of CALC_K_2.
531 * CALC_K256_2 expands CALC_K192_2 to handle 256-bit keys, by doing an
532 * additional lookup-and-XOR stage. The parameters a and b are the index
533 * preprocessed through q0 and q1 respectively; j is the index of the first
534 * key byte to use. CALC_K256 is identical to CALC_K but for using the
535 * CALC_K256_2 macro instead of CALC_K_2. */
536
537#define CALC_K_2(a, b, c, d, j) \
538 mds[0][q0[a ^ key[(j) + 8]] ^ key[j]] \
539 ^ mds[1][q0[b ^ key[(j) + 9]] ^ key[(j) + 1]] \
540 ^ mds[2][q1[c ^ key[(j) + 10]] ^ key[(j) + 2]] \
541 ^ mds[3][q1[d ^ key[(j) + 11]] ^ key[(j) + 3]]
542
543#define CALC_K(a, j, k, l, m, n) \
544 x = CALC_K_2 (k, l, k, l, 0); \
545 y = CALC_K_2 (m, n, m, n, 4); \
546 y = rol32(y, 8); \
547 x += y; y += x; ctx->a[j] = x; \
548 ctx->a[(j) + 1] = rol32(y, 9)
549
550#define CALC_K192_2(a, b, c, d, j) \
551 CALC_K_2 (q0[a ^ key[(j) + 16]], \
552 q1[b ^ key[(j) + 17]], \
553 q0[c ^ key[(j) + 18]], \
554 q1[d ^ key[(j) + 19]], j)
555
556#define CALC_K192(a, j, k, l, m, n) \
557 x = CALC_K192_2 (l, l, k, k, 0); \
558 y = CALC_K192_2 (n, n, m, m, 4); \
559 y = rol32(y, 8); \
560 x += y; y += x; ctx->a[j] = x; \
561 ctx->a[(j) + 1] = rol32(y, 9)
562
563#define CALC_K256_2(a, b, j) \
564 CALC_K192_2 (q1[b ^ key[(j) + 24]], \
565 q1[a ^ key[(j) + 25]], \
566 q0[a ^ key[(j) + 26]], \
567 q0[b ^ key[(j) + 27]], j)
568
569#define CALC_K256(a, j, k, l, m, n) \
570 x = CALC_K256_2 (k, l, 0); \
571 y = CALC_K256_2 (m, n, 4); \
572 y = rol32(y, 8); \
573 x += y; y += x; ctx->a[j] = x; \
574 ctx->a[(j) + 1] = rol32(y, 9)
575
576
577/* Macros to compute the g() function in the encryption and decryption 50/* Macros to compute the g() function in the encryption and decryption
578 * rounds. G1 is the straight g() function; G2 includes the 8-bit 51 * rounds. G1 is the straight g() function; G2 includes the 8-bit
579 * rotation for the high 32-bit word. */ 52 * rotation for the high 32-bit word. */
@@ -630,176 +103,7 @@ static const u8 calc_sb_tbl[512] = {
630 x ^= ctx->w[m]; \ 103 x ^= ctx->w[m]; \
631 dst[n] = cpu_to_le32(x) 104 dst[n] = cpu_to_le32(x)
632 105
633#define TF_MIN_KEY_SIZE 16
634#define TF_MAX_KEY_SIZE 32
635#define TF_BLOCK_SIZE 16
636
637/* Structure for an expanded Twofish key. s contains the key-dependent
638 * S-boxes composed with the MDS matrix; w contains the eight "whitening"
639 * subkeys, K[0] through K[7]. k holds the remaining, "round" subkeys. Note
640 * that k[i] corresponds to what the Twofish paper calls K[i+8]. */
641struct twofish_ctx {
642 u32 s[4][256], w[8], k[32];
643};
644
645/* Perform the key setup. */
646static int twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
647 unsigned int key_len, u32 *flags)
648{
649
650 struct twofish_ctx *ctx = crypto_tfm_ctx(tfm);
651 106
652 int i, j, k;
653
654 /* Temporaries for CALC_K. */
655 u32 x, y;
656
657 /* The S vector used to key the S-boxes, split up into individual bytes.
658 * 128-bit keys use only sa through sh; 256-bit use all of them. */
659 u8 sa = 0, sb = 0, sc = 0, sd = 0, se = 0, sf = 0, sg = 0, sh = 0;
660 u8 si = 0, sj = 0, sk = 0, sl = 0, sm = 0, sn = 0, so = 0, sp = 0;
661
662 /* Temporary for CALC_S. */
663 u8 tmp;
664
665 /* Check key length. */
666 if (key_len != 16 && key_len != 24 && key_len != 32)
667 {
668 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
669 return -EINVAL; /* unsupported key length */
670 }
671
672 /* Compute the first two words of the S vector. The magic numbers are
673 * the entries of the RS matrix, preprocessed through poly_to_exp. The
674 * numbers in the comments are the original (polynomial form) matrix
675 * entries. */
676 CALC_S (sa, sb, sc, sd, 0, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */
677 CALC_S (sa, sb, sc, sd, 1, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */
678 CALC_S (sa, sb, sc, sd, 2, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */
679 CALC_S (sa, sb, sc, sd, 3, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */
680 CALC_S (sa, sb, sc, sd, 4, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */
681 CALC_S (sa, sb, sc, sd, 5, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */
682 CALC_S (sa, sb, sc, sd, 6, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */
683 CALC_S (sa, sb, sc, sd, 7, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */
684 CALC_S (se, sf, sg, sh, 8, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */
685 CALC_S (se, sf, sg, sh, 9, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */
686 CALC_S (se, sf, sg, sh, 10, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */
687 CALC_S (se, sf, sg, sh, 11, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */
688 CALC_S (se, sf, sg, sh, 12, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */
689 CALC_S (se, sf, sg, sh, 13, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */
690 CALC_S (se, sf, sg, sh, 14, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */
691 CALC_S (se, sf, sg, sh, 15, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */
692
693 if (key_len == 24 || key_len == 32) { /* 192- or 256-bit key */
694 /* Calculate the third word of the S vector */
695 CALC_S (si, sj, sk, sl, 16, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */
696 CALC_S (si, sj, sk, sl, 17, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */
697 CALC_S (si, sj, sk, sl, 18, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */
698 CALC_S (si, sj, sk, sl, 19, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */
699 CALC_S (si, sj, sk, sl, 20, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */
700 CALC_S (si, sj, sk, sl, 21, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */
701 CALC_S (si, sj, sk, sl, 22, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */
702 CALC_S (si, sj, sk, sl, 23, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */
703 }
704
705 if (key_len == 32) { /* 256-bit key */
706 /* Calculate the fourth word of the S vector */
707 CALC_S (sm, sn, so, sp, 24, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */
708 CALC_S (sm, sn, so, sp, 25, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */
709 CALC_S (sm, sn, so, sp, 26, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */
710 CALC_S (sm, sn, so, sp, 27, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */
711 CALC_S (sm, sn, so, sp, 28, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */
712 CALC_S (sm, sn, so, sp, 29, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */
713 CALC_S (sm, sn, so, sp, 30, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */
714 CALC_S (sm, sn, so, sp, 31, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */
715
716 /* Compute the S-boxes. */
717 for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) {
718 CALC_SB256_2( i, calc_sb_tbl[j], calc_sb_tbl[k] );
719 }
720
721 /* Calculate whitening and round subkeys. The constants are
722 * indices of subkeys, preprocessed through q0 and q1. */
723 CALC_K256 (w, 0, 0xA9, 0x75, 0x67, 0xF3);
724 CALC_K256 (w, 2, 0xB3, 0xC6, 0xE8, 0xF4);
725 CALC_K256 (w, 4, 0x04, 0xDB, 0xFD, 0x7B);
726 CALC_K256 (w, 6, 0xA3, 0xFB, 0x76, 0xC8);
727 CALC_K256 (k, 0, 0x9A, 0x4A, 0x92, 0xD3);
728 CALC_K256 (k, 2, 0x80, 0xE6, 0x78, 0x6B);
729 CALC_K256 (k, 4, 0xE4, 0x45, 0xDD, 0x7D);
730 CALC_K256 (k, 6, 0xD1, 0xE8, 0x38, 0x4B);
731 CALC_K256 (k, 8, 0x0D, 0xD6, 0xC6, 0x32);
732 CALC_K256 (k, 10, 0x35, 0xD8, 0x98, 0xFD);
733 CALC_K256 (k, 12, 0x18, 0x37, 0xF7, 0x71);
734 CALC_K256 (k, 14, 0xEC, 0xF1, 0x6C, 0xE1);
735 CALC_K256 (k, 16, 0x43, 0x30, 0x75, 0x0F);
736 CALC_K256 (k, 18, 0x37, 0xF8, 0x26, 0x1B);
737 CALC_K256 (k, 20, 0xFA, 0x87, 0x13, 0xFA);
738 CALC_K256 (k, 22, 0x94, 0x06, 0x48, 0x3F);
739 CALC_K256 (k, 24, 0xF2, 0x5E, 0xD0, 0xBA);
740 CALC_K256 (k, 26, 0x8B, 0xAE, 0x30, 0x5B);
741 CALC_K256 (k, 28, 0x84, 0x8A, 0x54, 0x00);
742 CALC_K256 (k, 30, 0xDF, 0xBC, 0x23, 0x9D);
743 } else if (key_len == 24) { /* 192-bit key */
744 /* Compute the S-boxes. */
745 for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) {
746 CALC_SB192_2( i, calc_sb_tbl[j], calc_sb_tbl[k] );
747 }
748
749 /* Calculate whitening and round subkeys. The constants are
750 * indices of subkeys, preprocessed through q0 and q1. */
751 CALC_K192 (w, 0, 0xA9, 0x75, 0x67, 0xF3);
752 CALC_K192 (w, 2, 0xB3, 0xC6, 0xE8, 0xF4);
753 CALC_K192 (w, 4, 0x04, 0xDB, 0xFD, 0x7B);
754 CALC_K192 (w, 6, 0xA3, 0xFB, 0x76, 0xC8);
755 CALC_K192 (k, 0, 0x9A, 0x4A, 0x92, 0xD3);
756 CALC_K192 (k, 2, 0x80, 0xE6, 0x78, 0x6B);
757 CALC_K192 (k, 4, 0xE4, 0x45, 0xDD, 0x7D);
758 CALC_K192 (k, 6, 0xD1, 0xE8, 0x38, 0x4B);
759 CALC_K192 (k, 8, 0x0D, 0xD6, 0xC6, 0x32);
760 CALC_K192 (k, 10, 0x35, 0xD8, 0x98, 0xFD);
761 CALC_K192 (k, 12, 0x18, 0x37, 0xF7, 0x71);
762 CALC_K192 (k, 14, 0xEC, 0xF1, 0x6C, 0xE1);
763 CALC_K192 (k, 16, 0x43, 0x30, 0x75, 0x0F);
764 CALC_K192 (k, 18, 0x37, 0xF8, 0x26, 0x1B);
765 CALC_K192 (k, 20, 0xFA, 0x87, 0x13, 0xFA);
766 CALC_K192 (k, 22, 0x94, 0x06, 0x48, 0x3F);
767 CALC_K192 (k, 24, 0xF2, 0x5E, 0xD0, 0xBA);
768 CALC_K192 (k, 26, 0x8B, 0xAE, 0x30, 0x5B);
769 CALC_K192 (k, 28, 0x84, 0x8A, 0x54, 0x00);
770 CALC_K192 (k, 30, 0xDF, 0xBC, 0x23, 0x9D);
771 } else { /* 128-bit key */
772 /* Compute the S-boxes. */
773 for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) {
774 CALC_SB_2( i, calc_sb_tbl[j], calc_sb_tbl[k] );
775 }
776
777 /* Calculate whitening and round subkeys. The constants are
778 * indices of subkeys, preprocessed through q0 and q1. */
779 CALC_K (w, 0, 0xA9, 0x75, 0x67, 0xF3);
780 CALC_K (w, 2, 0xB3, 0xC6, 0xE8, 0xF4);
781 CALC_K (w, 4, 0x04, 0xDB, 0xFD, 0x7B);
782 CALC_K (w, 6, 0xA3, 0xFB, 0x76, 0xC8);
783 CALC_K (k, 0, 0x9A, 0x4A, 0x92, 0xD3);
784 CALC_K (k, 2, 0x80, 0xE6, 0x78, 0x6B);
785 CALC_K (k, 4, 0xE4, 0x45, 0xDD, 0x7D);
786 CALC_K (k, 6, 0xD1, 0xE8, 0x38, 0x4B);
787 CALC_K (k, 8, 0x0D, 0xD6, 0xC6, 0x32);
788 CALC_K (k, 10, 0x35, 0xD8, 0x98, 0xFD);
789 CALC_K (k, 12, 0x18, 0x37, 0xF7, 0x71);
790 CALC_K (k, 14, 0xEC, 0xF1, 0x6C, 0xE1);
791 CALC_K (k, 16, 0x43, 0x30, 0x75, 0x0F);
792 CALC_K (k, 18, 0x37, 0xF8, 0x26, 0x1B);
793 CALC_K (k, 20, 0xFA, 0x87, 0x13, 0xFA);
794 CALC_K (k, 22, 0x94, 0x06, 0x48, 0x3F);
795 CALC_K (k, 24, 0xF2, 0x5E, 0xD0, 0xBA);
796 CALC_K (k, 26, 0x8B, 0xAE, 0x30, 0x5B);
797 CALC_K (k, 28, 0x84, 0x8A, 0x54, 0x00);
798 CALC_K (k, 30, 0xDF, 0xBC, 0x23, 0x9D);
799 }
800
801 return 0;
802}
803 107
804/* Encrypt one block. in and out may be the same. */ 108/* Encrypt one block. in and out may be the same. */
805static void twofish_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 109static void twofish_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
@@ -877,6 +181,8 @@ static void twofish_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
877 181
878static struct crypto_alg alg = { 182static struct crypto_alg alg = {
879 .cra_name = "twofish", 183 .cra_name = "twofish",
184 .cra_driver_name = "twofish-generic",
185 .cra_priority = 100,
880 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 186 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
881 .cra_blocksize = TF_BLOCK_SIZE, 187 .cra_blocksize = TF_BLOCK_SIZE,
882 .cra_ctxsize = sizeof(struct twofish_ctx), 188 .cra_ctxsize = sizeof(struct twofish_ctx),
diff --git a/crypto/twofish_common.c b/crypto/twofish_common.c
new file mode 100644
index 000000000000..b4b9c0c3f4ae
--- /dev/null
+++ b/crypto/twofish_common.c
@@ -0,0 +1,744 @@
1/*
2 * Common Twofish algorithm parts shared between the c and assembler
3 * implementations
4 *
5 * Originally Twofish for GPG
6 * By Matthew Skala <mskala@ansuz.sooke.bc.ca>, July 26, 1998
7 * 256-bit key length added March 20, 1999
8 * Some modifications to reduce the text size by Werner Koch, April, 1998
9 * Ported to the kerneli patch by Marc Mutz <Marc@Mutz.com>
10 * Ported to CryptoAPI by Colin Slater <hoho@tacomeat.net>
11 *
12 * The original author has disclaimed all copyright interest in this
13 * code and thus put it in the public domain. The subsequent authors
14 * have put this under the GNU General Public License.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
29 * USA
30 *
31 * This code is a "clean room" implementation, written from the paper
32 * _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey,
33 * Doug Whiting, David Wagner, Chris Hall, and Niels Ferguson, available
34 * through http://www.counterpane.com/twofish.html
35 *
36 * For background information on multiplication in finite fields, used for
37 * the matrix operations in the key schedule, see the book _Contemporary
38 * Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the
39 * Third Edition.
40 */
41
42#include <crypto/twofish.h>
43#include <linux/bitops.h>
44#include <linux/crypto.h>
45#include <linux/errno.h>
46#include <linux/init.h>
47#include <linux/kernel.h>
48#include <linux/module.h>
49#include <linux/types.h>
50
51
52/* The large precomputed tables for the Twofish cipher (twofish.c)
53 * Taken from the same source as twofish.c
54 * Marc Mutz <Marc@Mutz.com>
55 */
56
57/* These two tables are the q0 and q1 permutations, exactly as described in
58 * the Twofish paper. */
59
60static const u8 q0[256] = {
61 0xA9, 0x67, 0xB3, 0xE8, 0x04, 0xFD, 0xA3, 0x76, 0x9A, 0x92, 0x80, 0x78,
62 0xE4, 0xDD, 0xD1, 0x38, 0x0D, 0xC6, 0x35, 0x98, 0x18, 0xF7, 0xEC, 0x6C,
63 0x43, 0x75, 0x37, 0x26, 0xFA, 0x13, 0x94, 0x48, 0xF2, 0xD0, 0x8B, 0x30,
64 0x84, 0x54, 0xDF, 0x23, 0x19, 0x5B, 0x3D, 0x59, 0xF3, 0xAE, 0xA2, 0x82,
65 0x63, 0x01, 0x83, 0x2E, 0xD9, 0x51, 0x9B, 0x7C, 0xA6, 0xEB, 0xA5, 0xBE,
66 0x16, 0x0C, 0xE3, 0x61, 0xC0, 0x8C, 0x3A, 0xF5, 0x73, 0x2C, 0x25, 0x0B,
67 0xBB, 0x4E, 0x89, 0x6B, 0x53, 0x6A, 0xB4, 0xF1, 0xE1, 0xE6, 0xBD, 0x45,
68 0xE2, 0xF4, 0xB6, 0x66, 0xCC, 0x95, 0x03, 0x56, 0xD4, 0x1C, 0x1E, 0xD7,
69 0xFB, 0xC3, 0x8E, 0xB5, 0xE9, 0xCF, 0xBF, 0xBA, 0xEA, 0x77, 0x39, 0xAF,
70 0x33, 0xC9, 0x62, 0x71, 0x81, 0x79, 0x09, 0xAD, 0x24, 0xCD, 0xF9, 0xD8,
71 0xE5, 0xC5, 0xB9, 0x4D, 0x44, 0x08, 0x86, 0xE7, 0xA1, 0x1D, 0xAA, 0xED,
72 0x06, 0x70, 0xB2, 0xD2, 0x41, 0x7B, 0xA0, 0x11, 0x31, 0xC2, 0x27, 0x90,
73 0x20, 0xF6, 0x60, 0xFF, 0x96, 0x5C, 0xB1, 0xAB, 0x9E, 0x9C, 0x52, 0x1B,
74 0x5F, 0x93, 0x0A, 0xEF, 0x91, 0x85, 0x49, 0xEE, 0x2D, 0x4F, 0x8F, 0x3B,
75 0x47, 0x87, 0x6D, 0x46, 0xD6, 0x3E, 0x69, 0x64, 0x2A, 0xCE, 0xCB, 0x2F,
76 0xFC, 0x97, 0x05, 0x7A, 0xAC, 0x7F, 0xD5, 0x1A, 0x4B, 0x0E, 0xA7, 0x5A,
77 0x28, 0x14, 0x3F, 0x29, 0x88, 0x3C, 0x4C, 0x02, 0xB8, 0xDA, 0xB0, 0x17,
78 0x55, 0x1F, 0x8A, 0x7D, 0x57, 0xC7, 0x8D, 0x74, 0xB7, 0xC4, 0x9F, 0x72,
79 0x7E, 0x15, 0x22, 0x12, 0x58, 0x07, 0x99, 0x34, 0x6E, 0x50, 0xDE, 0x68,
80 0x65, 0xBC, 0xDB, 0xF8, 0xC8, 0xA8, 0x2B, 0x40, 0xDC, 0xFE, 0x32, 0xA4,
81 0xCA, 0x10, 0x21, 0xF0, 0xD3, 0x5D, 0x0F, 0x00, 0x6F, 0x9D, 0x36, 0x42,
82 0x4A, 0x5E, 0xC1, 0xE0
83};
84
85static const u8 q1[256] = {
86 0x75, 0xF3, 0xC6, 0xF4, 0xDB, 0x7B, 0xFB, 0xC8, 0x4A, 0xD3, 0xE6, 0x6B,
87 0x45, 0x7D, 0xE8, 0x4B, 0xD6, 0x32, 0xD8, 0xFD, 0x37, 0x71, 0xF1, 0xE1,
88 0x30, 0x0F, 0xF8, 0x1B, 0x87, 0xFA, 0x06, 0x3F, 0x5E, 0xBA, 0xAE, 0x5B,
89 0x8A, 0x00, 0xBC, 0x9D, 0x6D, 0xC1, 0xB1, 0x0E, 0x80, 0x5D, 0xD2, 0xD5,
90 0xA0, 0x84, 0x07, 0x14, 0xB5, 0x90, 0x2C, 0xA3, 0xB2, 0x73, 0x4C, 0x54,
91 0x92, 0x74, 0x36, 0x51, 0x38, 0xB0, 0xBD, 0x5A, 0xFC, 0x60, 0x62, 0x96,
92 0x6C, 0x42, 0xF7, 0x10, 0x7C, 0x28, 0x27, 0x8C, 0x13, 0x95, 0x9C, 0xC7,
93 0x24, 0x46, 0x3B, 0x70, 0xCA, 0xE3, 0x85, 0xCB, 0x11, 0xD0, 0x93, 0xB8,
94 0xA6, 0x83, 0x20, 0xFF, 0x9F, 0x77, 0xC3, 0xCC, 0x03, 0x6F, 0x08, 0xBF,
95 0x40, 0xE7, 0x2B, 0xE2, 0x79, 0x0C, 0xAA, 0x82, 0x41, 0x3A, 0xEA, 0xB9,
96 0xE4, 0x9A, 0xA4, 0x97, 0x7E, 0xDA, 0x7A, 0x17, 0x66, 0x94, 0xA1, 0x1D,
97 0x3D, 0xF0, 0xDE, 0xB3, 0x0B, 0x72, 0xA7, 0x1C, 0xEF, 0xD1, 0x53, 0x3E,
98 0x8F, 0x33, 0x26, 0x5F, 0xEC, 0x76, 0x2A, 0x49, 0x81, 0x88, 0xEE, 0x21,
99 0xC4, 0x1A, 0xEB, 0xD9, 0xC5, 0x39, 0x99, 0xCD, 0xAD, 0x31, 0x8B, 0x01,
100 0x18, 0x23, 0xDD, 0x1F, 0x4E, 0x2D, 0xF9, 0x48, 0x4F, 0xF2, 0x65, 0x8E,
101 0x78, 0x5C, 0x58, 0x19, 0x8D, 0xE5, 0x98, 0x57, 0x67, 0x7F, 0x05, 0x64,
102 0xAF, 0x63, 0xB6, 0xFE, 0xF5, 0xB7, 0x3C, 0xA5, 0xCE, 0xE9, 0x68, 0x44,
103 0xE0, 0x4D, 0x43, 0x69, 0x29, 0x2E, 0xAC, 0x15, 0x59, 0xA8, 0x0A, 0x9E,
104 0x6E, 0x47, 0xDF, 0x34, 0x35, 0x6A, 0xCF, 0xDC, 0x22, 0xC9, 0xC0, 0x9B,
105 0x89, 0xD4, 0xED, 0xAB, 0x12, 0xA2, 0x0D, 0x52, 0xBB, 0x02, 0x2F, 0xA9,
106 0xD7, 0x61, 0x1E, 0xB4, 0x50, 0x04, 0xF6, 0xC2, 0x16, 0x25, 0x86, 0x56,
107 0x55, 0x09, 0xBE, 0x91
108};
109
110/* These MDS tables are actually tables of MDS composed with q0 and q1,
111 * because it is only ever used that way and we can save some time by
112 * precomputing. Of course the main saving comes from precomputing the
113 * GF(2^8) multiplication involved in the MDS matrix multiply; by looking
114 * things up in these tables we reduce the matrix multiply to four lookups
115 * and three XORs. Semi-formally, the definition of these tables is:
116 * mds[0][i] = MDS (q1[i] 0 0 0)^T mds[1][i] = MDS (0 q0[i] 0 0)^T
117 * mds[2][i] = MDS (0 0 q1[i] 0)^T mds[3][i] = MDS (0 0 0 q0[i])^T
118 * where ^T means "transpose", the matrix multiply is performed in GF(2^8)
119 * represented as GF(2)[x]/v(x) where v(x)=x^8+x^6+x^5+x^3+1 as described
120 * by Schneier et al, and I'm casually glossing over the byte/word
121 * conversion issues. */
122
123static const u32 mds[4][256] = {
124 {
125 0xBCBC3275, 0xECEC21F3, 0x202043C6, 0xB3B3C9F4, 0xDADA03DB, 0x02028B7B,
126 0xE2E22BFB, 0x9E9EFAC8, 0xC9C9EC4A, 0xD4D409D3, 0x18186BE6, 0x1E1E9F6B,
127 0x98980E45, 0xB2B2387D, 0xA6A6D2E8, 0x2626B74B, 0x3C3C57D6, 0x93938A32,
128 0x8282EED8, 0x525298FD, 0x7B7BD437, 0xBBBB3771, 0x5B5B97F1, 0x474783E1,
129 0x24243C30, 0x5151E20F, 0xBABAC6F8, 0x4A4AF31B, 0xBFBF4887, 0x0D0D70FA,
130 0xB0B0B306, 0x7575DE3F, 0xD2D2FD5E, 0x7D7D20BA, 0x666631AE, 0x3A3AA35B,
131 0x59591C8A, 0x00000000, 0xCDCD93BC, 0x1A1AE09D, 0xAEAE2C6D, 0x7F7FABC1,
132 0x2B2BC7B1, 0xBEBEB90E, 0xE0E0A080, 0x8A8A105D, 0x3B3B52D2, 0x6464BAD5,
133 0xD8D888A0, 0xE7E7A584, 0x5F5FE807, 0x1B1B1114, 0x2C2CC2B5, 0xFCFCB490,
134 0x3131272C, 0x808065A3, 0x73732AB2, 0x0C0C8173, 0x79795F4C, 0x6B6B4154,
135 0x4B4B0292, 0x53536974, 0x94948F36, 0x83831F51, 0x2A2A3638, 0xC4C49CB0,
136 0x2222C8BD, 0xD5D5F85A, 0xBDBDC3FC, 0x48487860, 0xFFFFCE62, 0x4C4C0796,
137 0x4141776C, 0xC7C7E642, 0xEBEB24F7, 0x1C1C1410, 0x5D5D637C, 0x36362228,
138 0x6767C027, 0xE9E9AF8C, 0x4444F913, 0x1414EA95, 0xF5F5BB9C, 0xCFCF18C7,
139 0x3F3F2D24, 0xC0C0E346, 0x7272DB3B, 0x54546C70, 0x29294CCA, 0xF0F035E3,
140 0x0808FE85, 0xC6C617CB, 0xF3F34F11, 0x8C8CE4D0, 0xA4A45993, 0xCACA96B8,
141 0x68683BA6, 0xB8B84D83, 0x38382820, 0xE5E52EFF, 0xADAD569F, 0x0B0B8477,
142 0xC8C81DC3, 0x9999FFCC, 0x5858ED03, 0x19199A6F, 0x0E0E0A08, 0x95957EBF,
143 0x70705040, 0xF7F730E7, 0x6E6ECF2B, 0x1F1F6EE2, 0xB5B53D79, 0x09090F0C,
144 0x616134AA, 0x57571682, 0x9F9F0B41, 0x9D9D803A, 0x111164EA, 0x2525CDB9,
145 0xAFAFDDE4, 0x4545089A, 0xDFDF8DA4, 0xA3A35C97, 0xEAEAD57E, 0x353558DA,
146 0xEDEDD07A, 0x4343FC17, 0xF8F8CB66, 0xFBFBB194, 0x3737D3A1, 0xFAFA401D,
147 0xC2C2683D, 0xB4B4CCF0, 0x32325DDE, 0x9C9C71B3, 0x5656E70B, 0xE3E3DA72,
148 0x878760A7, 0x15151B1C, 0xF9F93AEF, 0x6363BFD1, 0x3434A953, 0x9A9A853E,
149 0xB1B1428F, 0x7C7CD133, 0x88889B26, 0x3D3DA65F, 0xA1A1D7EC, 0xE4E4DF76,
150 0x8181942A, 0x91910149, 0x0F0FFB81, 0xEEEEAA88, 0x161661EE, 0xD7D77321,
151 0x9797F5C4, 0xA5A5A81A, 0xFEFE3FEB, 0x6D6DB5D9, 0x7878AEC5, 0xC5C56D39,
152 0x1D1DE599, 0x7676A4CD, 0x3E3EDCAD, 0xCBCB6731, 0xB6B6478B, 0xEFEF5B01,
153 0x12121E18, 0x6060C523, 0x6A6AB0DD, 0x4D4DF61F, 0xCECEE94E, 0xDEDE7C2D,
154 0x55559DF9, 0x7E7E5A48, 0x2121B24F, 0x03037AF2, 0xA0A02665, 0x5E5E198E,
155 0x5A5A6678, 0x65654B5C, 0x62624E58, 0xFDFD4519, 0x0606F48D, 0x404086E5,
156 0xF2F2BE98, 0x3333AC57, 0x17179067, 0x05058E7F, 0xE8E85E05, 0x4F4F7D64,
157 0x89896AAF, 0x10109563, 0x74742FB6, 0x0A0A75FE, 0x5C5C92F5, 0x9B9B74B7,
158 0x2D2D333C, 0x3030D6A5, 0x2E2E49CE, 0x494989E9, 0x46467268, 0x77775544,
159 0xA8A8D8E0, 0x9696044D, 0x2828BD43, 0xA9A92969, 0xD9D97929, 0x8686912E,
160 0xD1D187AC, 0xF4F44A15, 0x8D8D1559, 0xD6D682A8, 0xB9B9BC0A, 0x42420D9E,
161 0xF6F6C16E, 0x2F2FB847, 0xDDDD06DF, 0x23233934, 0xCCCC6235, 0xF1F1C46A,
162 0xC1C112CF, 0x8585EBDC, 0x8F8F9E22, 0x7171A1C9, 0x9090F0C0, 0xAAAA539B,
163 0x0101F189, 0x8B8BE1D4, 0x4E4E8CED, 0x8E8E6FAB, 0xABABA212, 0x6F6F3EA2,
164 0xE6E6540D, 0xDBDBF252, 0x92927BBB, 0xB7B7B602, 0x6969CA2F, 0x3939D9A9,
165 0xD3D30CD7, 0xA7A72361, 0xA2A2AD1E, 0xC3C399B4, 0x6C6C4450, 0x07070504,
166 0x04047FF6, 0x272746C2, 0xACACA716, 0xD0D07625, 0x50501386, 0xDCDCF756,
167 0x84841A55, 0xE1E15109, 0x7A7A25BE, 0x1313EF91},
168
169 {
170 0xA9D93939, 0x67901717, 0xB3719C9C, 0xE8D2A6A6, 0x04050707, 0xFD985252,
171 0xA3658080, 0x76DFE4E4, 0x9A084545, 0x92024B4B, 0x80A0E0E0, 0x78665A5A,
172 0xE4DDAFAF, 0xDDB06A6A, 0xD1BF6363, 0x38362A2A, 0x0D54E6E6, 0xC6432020,
173 0x3562CCCC, 0x98BEF2F2, 0x181E1212, 0xF724EBEB, 0xECD7A1A1, 0x6C774141,
174 0x43BD2828, 0x7532BCBC, 0x37D47B7B, 0x269B8888, 0xFA700D0D, 0x13F94444,
175 0x94B1FBFB, 0x485A7E7E, 0xF27A0303, 0xD0E48C8C, 0x8B47B6B6, 0x303C2424,
176 0x84A5E7E7, 0x54416B6B, 0xDF06DDDD, 0x23C56060, 0x1945FDFD, 0x5BA33A3A,
177 0x3D68C2C2, 0x59158D8D, 0xF321ECEC, 0xAE316666, 0xA23E6F6F, 0x82165757,
178 0x63951010, 0x015BEFEF, 0x834DB8B8, 0x2E918686, 0xD9B56D6D, 0x511F8383,
179 0x9B53AAAA, 0x7C635D5D, 0xA63B6868, 0xEB3FFEFE, 0xA5D63030, 0xBE257A7A,
180 0x16A7ACAC, 0x0C0F0909, 0xE335F0F0, 0x6123A7A7, 0xC0F09090, 0x8CAFE9E9,
181 0x3A809D9D, 0xF5925C5C, 0x73810C0C, 0x2C273131, 0x2576D0D0, 0x0BE75656,
182 0xBB7B9292, 0x4EE9CECE, 0x89F10101, 0x6B9F1E1E, 0x53A93434, 0x6AC4F1F1,
183 0xB499C3C3, 0xF1975B5B, 0xE1834747, 0xE66B1818, 0xBDC82222, 0x450E9898,
184 0xE26E1F1F, 0xF4C9B3B3, 0xB62F7474, 0x66CBF8F8, 0xCCFF9999, 0x95EA1414,
185 0x03ED5858, 0x56F7DCDC, 0xD4E18B8B, 0x1C1B1515, 0x1EADA2A2, 0xD70CD3D3,
186 0xFB2BE2E2, 0xC31DC8C8, 0x8E195E5E, 0xB5C22C2C, 0xE9894949, 0xCF12C1C1,
187 0xBF7E9595, 0xBA207D7D, 0xEA641111, 0x77840B0B, 0x396DC5C5, 0xAF6A8989,
188 0x33D17C7C, 0xC9A17171, 0x62CEFFFF, 0x7137BBBB, 0x81FB0F0F, 0x793DB5B5,
189 0x0951E1E1, 0xADDC3E3E, 0x242D3F3F, 0xCDA47676, 0xF99D5555, 0xD8EE8282,
190 0xE5864040, 0xC5AE7878, 0xB9CD2525, 0x4D049696, 0x44557777, 0x080A0E0E,
191 0x86135050, 0xE730F7F7, 0xA1D33737, 0x1D40FAFA, 0xAA346161, 0xED8C4E4E,
192 0x06B3B0B0, 0x706C5454, 0xB22A7373, 0xD2523B3B, 0x410B9F9F, 0x7B8B0202,
193 0xA088D8D8, 0x114FF3F3, 0x3167CBCB, 0xC2462727, 0x27C06767, 0x90B4FCFC,
194 0x20283838, 0xF67F0404, 0x60784848, 0xFF2EE5E5, 0x96074C4C, 0x5C4B6565,
195 0xB1C72B2B, 0xAB6F8E8E, 0x9E0D4242, 0x9CBBF5F5, 0x52F2DBDB, 0x1BF34A4A,
196 0x5FA63D3D, 0x9359A4A4, 0x0ABCB9B9, 0xEF3AF9F9, 0x91EF1313, 0x85FE0808,
197 0x49019191, 0xEE611616, 0x2D7CDEDE, 0x4FB22121, 0x8F42B1B1, 0x3BDB7272,
198 0x47B82F2F, 0x8748BFBF, 0x6D2CAEAE, 0x46E3C0C0, 0xD6573C3C, 0x3E859A9A,
199 0x6929A9A9, 0x647D4F4F, 0x2A948181, 0xCE492E2E, 0xCB17C6C6, 0x2FCA6969,
200 0xFCC3BDBD, 0x975CA3A3, 0x055EE8E8, 0x7AD0EDED, 0xAC87D1D1, 0x7F8E0505,
201 0xD5BA6464, 0x1AA8A5A5, 0x4BB72626, 0x0EB9BEBE, 0xA7608787, 0x5AF8D5D5,
202 0x28223636, 0x14111B1B, 0x3FDE7575, 0x2979D9D9, 0x88AAEEEE, 0x3C332D2D,
203 0x4C5F7979, 0x02B6B7B7, 0xB896CACA, 0xDA583535, 0xB09CC4C4, 0x17FC4343,
204 0x551A8484, 0x1FF64D4D, 0x8A1C5959, 0x7D38B2B2, 0x57AC3333, 0xC718CFCF,
205 0x8DF40606, 0x74695353, 0xB7749B9B, 0xC4F59797, 0x9F56ADAD, 0x72DAE3E3,
206 0x7ED5EAEA, 0x154AF4F4, 0x229E8F8F, 0x12A2ABAB, 0x584E6262, 0x07E85F5F,
207 0x99E51D1D, 0x34392323, 0x6EC1F6F6, 0x50446C6C, 0xDE5D3232, 0x68724646,
208 0x6526A0A0, 0xBC93CDCD, 0xDB03DADA, 0xF8C6BABA, 0xC8FA9E9E, 0xA882D6D6,
209 0x2BCF6E6E, 0x40507070, 0xDCEB8585, 0xFE750A0A, 0x328A9393, 0xA48DDFDF,
210 0xCA4C2929, 0x10141C1C, 0x2173D7D7, 0xF0CCB4B4, 0xD309D4D4, 0x5D108A8A,
211 0x0FE25151, 0x00000000, 0x6F9A1919, 0x9DE01A1A, 0x368F9494, 0x42E6C7C7,
212 0x4AECC9C9, 0x5EFDD2D2, 0xC1AB7F7F, 0xE0D8A8A8},
213
214 {
215 0xBC75BC32, 0xECF3EC21, 0x20C62043, 0xB3F4B3C9, 0xDADBDA03, 0x027B028B,
216 0xE2FBE22B, 0x9EC89EFA, 0xC94AC9EC, 0xD4D3D409, 0x18E6186B, 0x1E6B1E9F,
217 0x9845980E, 0xB27DB238, 0xA6E8A6D2, 0x264B26B7, 0x3CD63C57, 0x9332938A,
218 0x82D882EE, 0x52FD5298, 0x7B377BD4, 0xBB71BB37, 0x5BF15B97, 0x47E14783,
219 0x2430243C, 0x510F51E2, 0xBAF8BAC6, 0x4A1B4AF3, 0xBF87BF48, 0x0DFA0D70,
220 0xB006B0B3, 0x753F75DE, 0xD25ED2FD, 0x7DBA7D20, 0x66AE6631, 0x3A5B3AA3,
221 0x598A591C, 0x00000000, 0xCDBCCD93, 0x1A9D1AE0, 0xAE6DAE2C, 0x7FC17FAB,
222 0x2BB12BC7, 0xBE0EBEB9, 0xE080E0A0, 0x8A5D8A10, 0x3BD23B52, 0x64D564BA,
223 0xD8A0D888, 0xE784E7A5, 0x5F075FE8, 0x1B141B11, 0x2CB52CC2, 0xFC90FCB4,
224 0x312C3127, 0x80A38065, 0x73B2732A, 0x0C730C81, 0x794C795F, 0x6B546B41,
225 0x4B924B02, 0x53745369, 0x9436948F, 0x8351831F, 0x2A382A36, 0xC4B0C49C,
226 0x22BD22C8, 0xD55AD5F8, 0xBDFCBDC3, 0x48604878, 0xFF62FFCE, 0x4C964C07,
227 0x416C4177, 0xC742C7E6, 0xEBF7EB24, 0x1C101C14, 0x5D7C5D63, 0x36283622,
228 0x672767C0, 0xE98CE9AF, 0x441344F9, 0x149514EA, 0xF59CF5BB, 0xCFC7CF18,
229 0x3F243F2D, 0xC046C0E3, 0x723B72DB, 0x5470546C, 0x29CA294C, 0xF0E3F035,
230 0x088508FE, 0xC6CBC617, 0xF311F34F, 0x8CD08CE4, 0xA493A459, 0xCAB8CA96,
231 0x68A6683B, 0xB883B84D, 0x38203828, 0xE5FFE52E, 0xAD9FAD56, 0x0B770B84,
232 0xC8C3C81D, 0x99CC99FF, 0x580358ED, 0x196F199A, 0x0E080E0A, 0x95BF957E,
233 0x70407050, 0xF7E7F730, 0x6E2B6ECF, 0x1FE21F6E, 0xB579B53D, 0x090C090F,
234 0x61AA6134, 0x57825716, 0x9F419F0B, 0x9D3A9D80, 0x11EA1164, 0x25B925CD,
235 0xAFE4AFDD, 0x459A4508, 0xDFA4DF8D, 0xA397A35C, 0xEA7EEAD5, 0x35DA3558,
236 0xED7AEDD0, 0x431743FC, 0xF866F8CB, 0xFB94FBB1, 0x37A137D3, 0xFA1DFA40,
237 0xC23DC268, 0xB4F0B4CC, 0x32DE325D, 0x9CB39C71, 0x560B56E7, 0xE372E3DA,
238 0x87A78760, 0x151C151B, 0xF9EFF93A, 0x63D163BF, 0x345334A9, 0x9A3E9A85,
239 0xB18FB142, 0x7C337CD1, 0x8826889B, 0x3D5F3DA6, 0xA1ECA1D7, 0xE476E4DF,
240 0x812A8194, 0x91499101, 0x0F810FFB, 0xEE88EEAA, 0x16EE1661, 0xD721D773,
241 0x97C497F5, 0xA51AA5A8, 0xFEEBFE3F, 0x6DD96DB5, 0x78C578AE, 0xC539C56D,
242 0x1D991DE5, 0x76CD76A4, 0x3EAD3EDC, 0xCB31CB67, 0xB68BB647, 0xEF01EF5B,
243 0x1218121E, 0x602360C5, 0x6ADD6AB0, 0x4D1F4DF6, 0xCE4ECEE9, 0xDE2DDE7C,
244 0x55F9559D, 0x7E487E5A, 0x214F21B2, 0x03F2037A, 0xA065A026, 0x5E8E5E19,
245 0x5A785A66, 0x655C654B, 0x6258624E, 0xFD19FD45, 0x068D06F4, 0x40E54086,
246 0xF298F2BE, 0x335733AC, 0x17671790, 0x057F058E, 0xE805E85E, 0x4F644F7D,
247 0x89AF896A, 0x10631095, 0x74B6742F, 0x0AFE0A75, 0x5CF55C92, 0x9BB79B74,
248 0x2D3C2D33, 0x30A530D6, 0x2ECE2E49, 0x49E94989, 0x46684672, 0x77447755,
249 0xA8E0A8D8, 0x964D9604, 0x284328BD, 0xA969A929, 0xD929D979, 0x862E8691,
250 0xD1ACD187, 0xF415F44A, 0x8D598D15, 0xD6A8D682, 0xB90AB9BC, 0x429E420D,
251 0xF66EF6C1, 0x2F472FB8, 0xDDDFDD06, 0x23342339, 0xCC35CC62, 0xF16AF1C4,
252 0xC1CFC112, 0x85DC85EB, 0x8F228F9E, 0x71C971A1, 0x90C090F0, 0xAA9BAA53,
253 0x018901F1, 0x8BD48BE1, 0x4EED4E8C, 0x8EAB8E6F, 0xAB12ABA2, 0x6FA26F3E,
254 0xE60DE654, 0xDB52DBF2, 0x92BB927B, 0xB702B7B6, 0x692F69CA, 0x39A939D9,
255 0xD3D7D30C, 0xA761A723, 0xA21EA2AD, 0xC3B4C399, 0x6C506C44, 0x07040705,
256 0x04F6047F, 0x27C22746, 0xAC16ACA7, 0xD025D076, 0x50865013, 0xDC56DCF7,
257 0x8455841A, 0xE109E151, 0x7ABE7A25, 0x139113EF},
258
259 {
260 0xD939A9D9, 0x90176790, 0x719CB371, 0xD2A6E8D2, 0x05070405, 0x9852FD98,
261 0x6580A365, 0xDFE476DF, 0x08459A08, 0x024B9202, 0xA0E080A0, 0x665A7866,
262 0xDDAFE4DD, 0xB06ADDB0, 0xBF63D1BF, 0x362A3836, 0x54E60D54, 0x4320C643,
263 0x62CC3562, 0xBEF298BE, 0x1E12181E, 0x24EBF724, 0xD7A1ECD7, 0x77416C77,
264 0xBD2843BD, 0x32BC7532, 0xD47B37D4, 0x9B88269B, 0x700DFA70, 0xF94413F9,
265 0xB1FB94B1, 0x5A7E485A, 0x7A03F27A, 0xE48CD0E4, 0x47B68B47, 0x3C24303C,
266 0xA5E784A5, 0x416B5441, 0x06DDDF06, 0xC56023C5, 0x45FD1945, 0xA33A5BA3,
267 0x68C23D68, 0x158D5915, 0x21ECF321, 0x3166AE31, 0x3E6FA23E, 0x16578216,
268 0x95106395, 0x5BEF015B, 0x4DB8834D, 0x91862E91, 0xB56DD9B5, 0x1F83511F,
269 0x53AA9B53, 0x635D7C63, 0x3B68A63B, 0x3FFEEB3F, 0xD630A5D6, 0x257ABE25,
270 0xA7AC16A7, 0x0F090C0F, 0x35F0E335, 0x23A76123, 0xF090C0F0, 0xAFE98CAF,
271 0x809D3A80, 0x925CF592, 0x810C7381, 0x27312C27, 0x76D02576, 0xE7560BE7,
272 0x7B92BB7B, 0xE9CE4EE9, 0xF10189F1, 0x9F1E6B9F, 0xA93453A9, 0xC4F16AC4,
273 0x99C3B499, 0x975BF197, 0x8347E183, 0x6B18E66B, 0xC822BDC8, 0x0E98450E,
274 0x6E1FE26E, 0xC9B3F4C9, 0x2F74B62F, 0xCBF866CB, 0xFF99CCFF, 0xEA1495EA,
275 0xED5803ED, 0xF7DC56F7, 0xE18BD4E1, 0x1B151C1B, 0xADA21EAD, 0x0CD3D70C,
276 0x2BE2FB2B, 0x1DC8C31D, 0x195E8E19, 0xC22CB5C2, 0x8949E989, 0x12C1CF12,
277 0x7E95BF7E, 0x207DBA20, 0x6411EA64, 0x840B7784, 0x6DC5396D, 0x6A89AF6A,
278 0xD17C33D1, 0xA171C9A1, 0xCEFF62CE, 0x37BB7137, 0xFB0F81FB, 0x3DB5793D,
279 0x51E10951, 0xDC3EADDC, 0x2D3F242D, 0xA476CDA4, 0x9D55F99D, 0xEE82D8EE,
280 0x8640E586, 0xAE78C5AE, 0xCD25B9CD, 0x04964D04, 0x55774455, 0x0A0E080A,
281 0x13508613, 0x30F7E730, 0xD337A1D3, 0x40FA1D40, 0x3461AA34, 0x8C4EED8C,
282 0xB3B006B3, 0x6C54706C, 0x2A73B22A, 0x523BD252, 0x0B9F410B, 0x8B027B8B,
283 0x88D8A088, 0x4FF3114F, 0x67CB3167, 0x4627C246, 0xC06727C0, 0xB4FC90B4,
284 0x28382028, 0x7F04F67F, 0x78486078, 0x2EE5FF2E, 0x074C9607, 0x4B655C4B,
285 0xC72BB1C7, 0x6F8EAB6F, 0x0D429E0D, 0xBBF59CBB, 0xF2DB52F2, 0xF34A1BF3,
286 0xA63D5FA6, 0x59A49359, 0xBCB90ABC, 0x3AF9EF3A, 0xEF1391EF, 0xFE0885FE,
287 0x01914901, 0x6116EE61, 0x7CDE2D7C, 0xB2214FB2, 0x42B18F42, 0xDB723BDB,
288 0xB82F47B8, 0x48BF8748, 0x2CAE6D2C, 0xE3C046E3, 0x573CD657, 0x859A3E85,
289 0x29A96929, 0x7D4F647D, 0x94812A94, 0x492ECE49, 0x17C6CB17, 0xCA692FCA,
290 0xC3BDFCC3, 0x5CA3975C, 0x5EE8055E, 0xD0ED7AD0, 0x87D1AC87, 0x8E057F8E,
291 0xBA64D5BA, 0xA8A51AA8, 0xB7264BB7, 0xB9BE0EB9, 0x6087A760, 0xF8D55AF8,
292 0x22362822, 0x111B1411, 0xDE753FDE, 0x79D92979, 0xAAEE88AA, 0x332D3C33,
293 0x5F794C5F, 0xB6B702B6, 0x96CAB896, 0x5835DA58, 0x9CC4B09C, 0xFC4317FC,
294 0x1A84551A, 0xF64D1FF6, 0x1C598A1C, 0x38B27D38, 0xAC3357AC, 0x18CFC718,
295 0xF4068DF4, 0x69537469, 0x749BB774, 0xF597C4F5, 0x56AD9F56, 0xDAE372DA,
296 0xD5EA7ED5, 0x4AF4154A, 0x9E8F229E, 0xA2AB12A2, 0x4E62584E, 0xE85F07E8,
297 0xE51D99E5, 0x39233439, 0xC1F66EC1, 0x446C5044, 0x5D32DE5D, 0x72466872,
298 0x26A06526, 0x93CDBC93, 0x03DADB03, 0xC6BAF8C6, 0xFA9EC8FA, 0x82D6A882,
299 0xCF6E2BCF, 0x50704050, 0xEB85DCEB, 0x750AFE75, 0x8A93328A, 0x8DDFA48D,
300 0x4C29CA4C, 0x141C1014, 0x73D72173, 0xCCB4F0CC, 0x09D4D309, 0x108A5D10,
301 0xE2510FE2, 0x00000000, 0x9A196F9A, 0xE01A9DE0, 0x8F94368F, 0xE6C742E6,
302 0xECC94AEC, 0xFDD25EFD, 0xAB7FC1AB, 0xD8A8E0D8}
303};
304
305/* The exp_to_poly and poly_to_exp tables are used to perform efficient
306 * operations in GF(2^8) represented as GF(2)[x]/w(x) where
307 * w(x)=x^8+x^6+x^3+x^2+1. We care about doing that because it's part of the
308 * definition of the RS matrix in the key schedule. Elements of that field
309 * are polynomials of degree not greater than 7 and all coefficients 0 or 1,
310 * which can be represented naturally by bytes (just substitute x=2). In that
311 * form, GF(2^8) addition is the same as bitwise XOR, but GF(2^8)
312 * multiplication is inefficient without hardware support. To multiply
313 * faster, I make use of the fact x is a generator for the nonzero elements,
314 * so that every element p of GF(2)[x]/w(x) is either 0 or equal to (x)^n for
315 * some n in 0..254. Note that that caret is exponentiation in GF(2^8),
316 * *not* polynomial notation. So if I want to compute pq where p and q are
317 * in GF(2^8), I can just say:
318 * 1. if p=0 or q=0 then pq=0
319 * 2. otherwise, find m and n such that p=x^m and q=x^n
320 * 3. pq=(x^m)(x^n)=x^(m+n), so add m and n and find pq
321 * The translations in steps 2 and 3 are looked up in the tables
322 * poly_to_exp (for step 2) and exp_to_poly (for step 3). To see this
323 * in action, look at the CALC_S macro. As additional wrinkles, note that
324 * one of my operands is always a constant, so the poly_to_exp lookup on it
325 * is done in advance; I included the original values in the comments so
326 * readers can have some chance of recognizing that this *is* the RS matrix
327 * from the Twofish paper. I've only included the table entries I actually
328 * need; I never do a lookup on a variable input of zero and the biggest
329 * exponents I'll ever see are 254 (variable) and 237 (constant), so they'll
330 * never sum to more than 491. I'm repeating part of the exp_to_poly table
331 * so that I don't have to do mod-255 reduction in the exponent arithmetic.
332 * Since I know my constant operands are never zero, I only have to worry
333 * about zero values in the variable operand, and I do it with a simple
334 * conditional branch. I know conditionals are expensive, but I couldn't
335 * see a non-horrible way of avoiding them, and I did manage to group the
336 * statements so that each if covers four group multiplications. */
337
338static const u8 poly_to_exp[255] = {
339 0x00, 0x01, 0x17, 0x02, 0x2E, 0x18, 0x53, 0x03, 0x6A, 0x2F, 0x93, 0x19,
340 0x34, 0x54, 0x45, 0x04, 0x5C, 0x6B, 0xB6, 0x30, 0xA6, 0x94, 0x4B, 0x1A,
341 0x8C, 0x35, 0x81, 0x55, 0xAA, 0x46, 0x0D, 0x05, 0x24, 0x5D, 0x87, 0x6C,
342 0x9B, 0xB7, 0xC1, 0x31, 0x2B, 0xA7, 0xA3, 0x95, 0x98, 0x4C, 0xCA, 0x1B,
343 0xE6, 0x8D, 0x73, 0x36, 0xCD, 0x82, 0x12, 0x56, 0x62, 0xAB, 0xF0, 0x47,
344 0x4F, 0x0E, 0xBD, 0x06, 0xD4, 0x25, 0xD2, 0x5E, 0x27, 0x88, 0x66, 0x6D,
345 0xD6, 0x9C, 0x79, 0xB8, 0x08, 0xC2, 0xDF, 0x32, 0x68, 0x2C, 0xFD, 0xA8,
346 0x8A, 0xA4, 0x5A, 0x96, 0x29, 0x99, 0x22, 0x4D, 0x60, 0xCB, 0xE4, 0x1C,
347 0x7B, 0xE7, 0x3B, 0x8E, 0x9E, 0x74, 0xF4, 0x37, 0xD8, 0xCE, 0xF9, 0x83,
348 0x6F, 0x13, 0xB2, 0x57, 0xE1, 0x63, 0xDC, 0xAC, 0xC4, 0xF1, 0xAF, 0x48,
349 0x0A, 0x50, 0x42, 0x0F, 0xBA, 0xBE, 0xC7, 0x07, 0xDE, 0xD5, 0x78, 0x26,
350 0x65, 0xD3, 0xD1, 0x5F, 0xE3, 0x28, 0x21, 0x89, 0x59, 0x67, 0xFC, 0x6E,
351 0xB1, 0xD7, 0xF8, 0x9D, 0xF3, 0x7A, 0x3A, 0xB9, 0xC6, 0x09, 0x41, 0xC3,
352 0xAE, 0xE0, 0xDB, 0x33, 0x44, 0x69, 0x92, 0x2D, 0x52, 0xFE, 0x16, 0xA9,
353 0x0C, 0x8B, 0x80, 0xA5, 0x4A, 0x5B, 0xB5, 0x97, 0xC9, 0x2A, 0xA2, 0x9A,
354 0xC0, 0x23, 0x86, 0x4E, 0xBC, 0x61, 0xEF, 0xCC, 0x11, 0xE5, 0x72, 0x1D,
355 0x3D, 0x7C, 0xEB, 0xE8, 0xE9, 0x3C, 0xEA, 0x8F, 0x7D, 0x9F, 0xEC, 0x75,
356 0x1E, 0xF5, 0x3E, 0x38, 0xF6, 0xD9, 0x3F, 0xCF, 0x76, 0xFA, 0x1F, 0x84,
357 0xA0, 0x70, 0xED, 0x14, 0x90, 0xB3, 0x7E, 0x58, 0xFB, 0xE2, 0x20, 0x64,
358 0xD0, 0xDD, 0x77, 0xAD, 0xDA, 0xC5, 0x40, 0xF2, 0x39, 0xB0, 0xF7, 0x49,
359 0xB4, 0x0B, 0x7F, 0x51, 0x15, 0x43, 0x91, 0x10, 0x71, 0xBB, 0xEE, 0xBF,
360 0x85, 0xC8, 0xA1
361};
362
363static const u8 exp_to_poly[492] = {
364 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x4D, 0x9A, 0x79, 0xF2,
365 0xA9, 0x1F, 0x3E, 0x7C, 0xF8, 0xBD, 0x37, 0x6E, 0xDC, 0xF5, 0xA7, 0x03,
366 0x06, 0x0C, 0x18, 0x30, 0x60, 0xC0, 0xCD, 0xD7, 0xE3, 0x8B, 0x5B, 0xB6,
367 0x21, 0x42, 0x84, 0x45, 0x8A, 0x59, 0xB2, 0x29, 0x52, 0xA4, 0x05, 0x0A,
368 0x14, 0x28, 0x50, 0xA0, 0x0D, 0x1A, 0x34, 0x68, 0xD0, 0xED, 0x97, 0x63,
369 0xC6, 0xC1, 0xCF, 0xD3, 0xEB, 0x9B, 0x7B, 0xF6, 0xA1, 0x0F, 0x1E, 0x3C,
370 0x78, 0xF0, 0xAD, 0x17, 0x2E, 0x5C, 0xB8, 0x3D, 0x7A, 0xF4, 0xA5, 0x07,
371 0x0E, 0x1C, 0x38, 0x70, 0xE0, 0x8D, 0x57, 0xAE, 0x11, 0x22, 0x44, 0x88,
372 0x5D, 0xBA, 0x39, 0x72, 0xE4, 0x85, 0x47, 0x8E, 0x51, 0xA2, 0x09, 0x12,
373 0x24, 0x48, 0x90, 0x6D, 0xDA, 0xF9, 0xBF, 0x33, 0x66, 0xCC, 0xD5, 0xE7,
374 0x83, 0x4B, 0x96, 0x61, 0xC2, 0xC9, 0xDF, 0xF3, 0xAB, 0x1B, 0x36, 0x6C,
375 0xD8, 0xFD, 0xB7, 0x23, 0x46, 0x8C, 0x55, 0xAA, 0x19, 0x32, 0x64, 0xC8,
376 0xDD, 0xF7, 0xA3, 0x0B, 0x16, 0x2C, 0x58, 0xB0, 0x2D, 0x5A, 0xB4, 0x25,
377 0x4A, 0x94, 0x65, 0xCA, 0xD9, 0xFF, 0xB3, 0x2B, 0x56, 0xAC, 0x15, 0x2A,
378 0x54, 0xA8, 0x1D, 0x3A, 0x74, 0xE8, 0x9D, 0x77, 0xEE, 0x91, 0x6F, 0xDE,
379 0xF1, 0xAF, 0x13, 0x26, 0x4C, 0x98, 0x7D, 0xFA, 0xB9, 0x3F, 0x7E, 0xFC,
380 0xB5, 0x27, 0x4E, 0x9C, 0x75, 0xEA, 0x99, 0x7F, 0xFE, 0xB1, 0x2F, 0x5E,
381 0xBC, 0x35, 0x6A, 0xD4, 0xE5, 0x87, 0x43, 0x86, 0x41, 0x82, 0x49, 0x92,
382 0x69, 0xD2, 0xE9, 0x9F, 0x73, 0xE6, 0x81, 0x4F, 0x9E, 0x71, 0xE2, 0x89,
383 0x5F, 0xBE, 0x31, 0x62, 0xC4, 0xC5, 0xC7, 0xC3, 0xCB, 0xDB, 0xFB, 0xBB,
384 0x3B, 0x76, 0xEC, 0x95, 0x67, 0xCE, 0xD1, 0xEF, 0x93, 0x6B, 0xD6, 0xE1,
385 0x8F, 0x53, 0xA6, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x4D,
386 0x9A, 0x79, 0xF2, 0xA9, 0x1F, 0x3E, 0x7C, 0xF8, 0xBD, 0x37, 0x6E, 0xDC,
387 0xF5, 0xA7, 0x03, 0x06, 0x0C, 0x18, 0x30, 0x60, 0xC0, 0xCD, 0xD7, 0xE3,
388 0x8B, 0x5B, 0xB6, 0x21, 0x42, 0x84, 0x45, 0x8A, 0x59, 0xB2, 0x29, 0x52,
389 0xA4, 0x05, 0x0A, 0x14, 0x28, 0x50, 0xA0, 0x0D, 0x1A, 0x34, 0x68, 0xD0,
390 0xED, 0x97, 0x63, 0xC6, 0xC1, 0xCF, 0xD3, 0xEB, 0x9B, 0x7B, 0xF6, 0xA1,
391 0x0F, 0x1E, 0x3C, 0x78, 0xF0, 0xAD, 0x17, 0x2E, 0x5C, 0xB8, 0x3D, 0x7A,
392 0xF4, 0xA5, 0x07, 0x0E, 0x1C, 0x38, 0x70, 0xE0, 0x8D, 0x57, 0xAE, 0x11,
393 0x22, 0x44, 0x88, 0x5D, 0xBA, 0x39, 0x72, 0xE4, 0x85, 0x47, 0x8E, 0x51,
394 0xA2, 0x09, 0x12, 0x24, 0x48, 0x90, 0x6D, 0xDA, 0xF9, 0xBF, 0x33, 0x66,
395 0xCC, 0xD5, 0xE7, 0x83, 0x4B, 0x96, 0x61, 0xC2, 0xC9, 0xDF, 0xF3, 0xAB,
396 0x1B, 0x36, 0x6C, 0xD8, 0xFD, 0xB7, 0x23, 0x46, 0x8C, 0x55, 0xAA, 0x19,
397 0x32, 0x64, 0xC8, 0xDD, 0xF7, 0xA3, 0x0B, 0x16, 0x2C, 0x58, 0xB0, 0x2D,
398 0x5A, 0xB4, 0x25, 0x4A, 0x94, 0x65, 0xCA, 0xD9, 0xFF, 0xB3, 0x2B, 0x56,
399 0xAC, 0x15, 0x2A, 0x54, 0xA8, 0x1D, 0x3A, 0x74, 0xE8, 0x9D, 0x77, 0xEE,
400 0x91, 0x6F, 0xDE, 0xF1, 0xAF, 0x13, 0x26, 0x4C, 0x98, 0x7D, 0xFA, 0xB9,
401 0x3F, 0x7E, 0xFC, 0xB5, 0x27, 0x4E, 0x9C, 0x75, 0xEA, 0x99, 0x7F, 0xFE,
402 0xB1, 0x2F, 0x5E, 0xBC, 0x35, 0x6A, 0xD4, 0xE5, 0x87, 0x43, 0x86, 0x41,
403 0x82, 0x49, 0x92, 0x69, 0xD2, 0xE9, 0x9F, 0x73, 0xE6, 0x81, 0x4F, 0x9E,
404 0x71, 0xE2, 0x89, 0x5F, 0xBE, 0x31, 0x62, 0xC4, 0xC5, 0xC7, 0xC3, 0xCB
405};
406
407
408/* The table constants are indices of
409 * S-box entries, preprocessed through q0 and q1. */
410static const u8 calc_sb_tbl[512] = {
411 0xA9, 0x75, 0x67, 0xF3, 0xB3, 0xC6, 0xE8, 0xF4,
412 0x04, 0xDB, 0xFD, 0x7B, 0xA3, 0xFB, 0x76, 0xC8,
413 0x9A, 0x4A, 0x92, 0xD3, 0x80, 0xE6, 0x78, 0x6B,
414 0xE4, 0x45, 0xDD, 0x7D, 0xD1, 0xE8, 0x38, 0x4B,
415 0x0D, 0xD6, 0xC6, 0x32, 0x35, 0xD8, 0x98, 0xFD,
416 0x18, 0x37, 0xF7, 0x71, 0xEC, 0xF1, 0x6C, 0xE1,
417 0x43, 0x30, 0x75, 0x0F, 0x37, 0xF8, 0x26, 0x1B,
418 0xFA, 0x87, 0x13, 0xFA, 0x94, 0x06, 0x48, 0x3F,
419 0xF2, 0x5E, 0xD0, 0xBA, 0x8B, 0xAE, 0x30, 0x5B,
420 0x84, 0x8A, 0x54, 0x00, 0xDF, 0xBC, 0x23, 0x9D,
421 0x19, 0x6D, 0x5B, 0xC1, 0x3D, 0xB1, 0x59, 0x0E,
422 0xF3, 0x80, 0xAE, 0x5D, 0xA2, 0xD2, 0x82, 0xD5,
423 0x63, 0xA0, 0x01, 0x84, 0x83, 0x07, 0x2E, 0x14,
424 0xD9, 0xB5, 0x51, 0x90, 0x9B, 0x2C, 0x7C, 0xA3,
425 0xA6, 0xB2, 0xEB, 0x73, 0xA5, 0x4C, 0xBE, 0x54,
426 0x16, 0x92, 0x0C, 0x74, 0xE3, 0x36, 0x61, 0x51,
427 0xC0, 0x38, 0x8C, 0xB0, 0x3A, 0xBD, 0xF5, 0x5A,
428 0x73, 0xFC, 0x2C, 0x60, 0x25, 0x62, 0x0B, 0x96,
429 0xBB, 0x6C, 0x4E, 0x42, 0x89, 0xF7, 0x6B, 0x10,
430 0x53, 0x7C, 0x6A, 0x28, 0xB4, 0x27, 0xF1, 0x8C,
431 0xE1, 0x13, 0xE6, 0x95, 0xBD, 0x9C, 0x45, 0xC7,
432 0xE2, 0x24, 0xF4, 0x46, 0xB6, 0x3B, 0x66, 0x70,
433 0xCC, 0xCA, 0x95, 0xE3, 0x03, 0x85, 0x56, 0xCB,
434 0xD4, 0x11, 0x1C, 0xD0, 0x1E, 0x93, 0xD7, 0xB8,
435 0xFB, 0xA6, 0xC3, 0x83, 0x8E, 0x20, 0xB5, 0xFF,
436 0xE9, 0x9F, 0xCF, 0x77, 0xBF, 0xC3, 0xBA, 0xCC,
437 0xEA, 0x03, 0x77, 0x6F, 0x39, 0x08, 0xAF, 0xBF,
438 0x33, 0x40, 0xC9, 0xE7, 0x62, 0x2B, 0x71, 0xE2,
439 0x81, 0x79, 0x79, 0x0C, 0x09, 0xAA, 0xAD, 0x82,
440 0x24, 0x41, 0xCD, 0x3A, 0xF9, 0xEA, 0xD8, 0xB9,
441 0xE5, 0xE4, 0xC5, 0x9A, 0xB9, 0xA4, 0x4D, 0x97,
442 0x44, 0x7E, 0x08, 0xDA, 0x86, 0x7A, 0xE7, 0x17,
443 0xA1, 0x66, 0x1D, 0x94, 0xAA, 0xA1, 0xED, 0x1D,
444 0x06, 0x3D, 0x70, 0xF0, 0xB2, 0xDE, 0xD2, 0xB3,
445 0x41, 0x0B, 0x7B, 0x72, 0xA0, 0xA7, 0x11, 0x1C,
446 0x31, 0xEF, 0xC2, 0xD1, 0x27, 0x53, 0x90, 0x3E,
447 0x20, 0x8F, 0xF6, 0x33, 0x60, 0x26, 0xFF, 0x5F,
448 0x96, 0xEC, 0x5C, 0x76, 0xB1, 0x2A, 0xAB, 0x49,
449 0x9E, 0x81, 0x9C, 0x88, 0x52, 0xEE, 0x1B, 0x21,
450 0x5F, 0xC4, 0x93, 0x1A, 0x0A, 0xEB, 0xEF, 0xD9,
451 0x91, 0xC5, 0x85, 0x39, 0x49, 0x99, 0xEE, 0xCD,
452 0x2D, 0xAD, 0x4F, 0x31, 0x8F, 0x8B, 0x3B, 0x01,
453 0x47, 0x18, 0x87, 0x23, 0x6D, 0xDD, 0x46, 0x1F,
454 0xD6, 0x4E, 0x3E, 0x2D, 0x69, 0xF9, 0x64, 0x48,
455 0x2A, 0x4F, 0xCE, 0xF2, 0xCB, 0x65, 0x2F, 0x8E,
456 0xFC, 0x78, 0x97, 0x5C, 0x05, 0x58, 0x7A, 0x19,
457 0xAC, 0x8D, 0x7F, 0xE5, 0xD5, 0x98, 0x1A, 0x57,
458 0x4B, 0x67, 0x0E, 0x7F, 0xA7, 0x05, 0x5A, 0x64,
459 0x28, 0xAF, 0x14, 0x63, 0x3F, 0xB6, 0x29, 0xFE,
460 0x88, 0xF5, 0x3C, 0xB7, 0x4C, 0x3C, 0x02, 0xA5,
461 0xB8, 0xCE, 0xDA, 0xE9, 0xB0, 0x68, 0x17, 0x44,
462 0x55, 0xE0, 0x1F, 0x4D, 0x8A, 0x43, 0x7D, 0x69,
463 0x57, 0x29, 0xC7, 0x2E, 0x8D, 0xAC, 0x74, 0x15,
464 0xB7, 0x59, 0xC4, 0xA8, 0x9F, 0x0A, 0x72, 0x9E,
465 0x7E, 0x6E, 0x15, 0x47, 0x22, 0xDF, 0x12, 0x34,
466 0x58, 0x35, 0x07, 0x6A, 0x99, 0xCF, 0x34, 0xDC,
467 0x6E, 0x22, 0x50, 0xC9, 0xDE, 0xC0, 0x68, 0x9B,
468 0x65, 0x89, 0xBC, 0xD4, 0xDB, 0xED, 0xF8, 0xAB,
469 0xC8, 0x12, 0xA8, 0xA2, 0x2B, 0x0D, 0x40, 0x52,
470 0xDC, 0xBB, 0xFE, 0x02, 0x32, 0x2F, 0xA4, 0xA9,
471 0xCA, 0xD7, 0x10, 0x61, 0x21, 0x1E, 0xF0, 0xB4,
472 0xD3, 0x50, 0x5D, 0x04, 0x0F, 0xF6, 0x00, 0xC2,
473 0x6F, 0x16, 0x9D, 0x25, 0x36, 0x86, 0x42, 0x56,
474 0x4A, 0x55, 0x5E, 0x09, 0xC1, 0xBE, 0xE0, 0x91
475};
476
477/* Macro to perform one column of the RS matrix multiplication. The
478 * parameters a, b, c, and d are the four bytes of output; i is the index
479 * of the key bytes, and w, x, y, and z, are the column of constants from
480 * the RS matrix, preprocessed through the poly_to_exp table. */
481
482#define CALC_S(a, b, c, d, i, w, x, y, z) \
483 if (key[i]) { \
484 tmp = poly_to_exp[key[i] - 1]; \
485 (a) ^= exp_to_poly[tmp + (w)]; \
486 (b) ^= exp_to_poly[tmp + (x)]; \
487 (c) ^= exp_to_poly[tmp + (y)]; \
488 (d) ^= exp_to_poly[tmp + (z)]; \
489 }
490
491/* Macros to calculate the key-dependent S-boxes for a 128-bit key using
492 * the S vector from CALC_S. CALC_SB_2 computes a single entry in all
493 * four S-boxes, where i is the index of the entry to compute, and a and b
494 * are the index numbers preprocessed through the q0 and q1 tables
495 * respectively. */
496
497#define CALC_SB_2(i, a, b) \
498 ctx->s[0][i] = mds[0][q0[(a) ^ sa] ^ se]; \
499 ctx->s[1][i] = mds[1][q0[(b) ^ sb] ^ sf]; \
500 ctx->s[2][i] = mds[2][q1[(a) ^ sc] ^ sg]; \
501 ctx->s[3][i] = mds[3][q1[(b) ^ sd] ^ sh]
502
503/* Macro exactly like CALC_SB_2, but for 192-bit keys. */
504
505#define CALC_SB192_2(i, a, b) \
506 ctx->s[0][i] = mds[0][q0[q0[(b) ^ sa] ^ se] ^ si]; \
507 ctx->s[1][i] = mds[1][q0[q1[(b) ^ sb] ^ sf] ^ sj]; \
508 ctx->s[2][i] = mds[2][q1[q0[(a) ^ sc] ^ sg] ^ sk]; \
509 ctx->s[3][i] = mds[3][q1[q1[(a) ^ sd] ^ sh] ^ sl];
510
511/* Macro exactly like CALC_SB_2, but for 256-bit keys. */
512
513#define CALC_SB256_2(i, a, b) \
514 ctx->s[0][i] = mds[0][q0[q0[q1[(b) ^ sa] ^ se] ^ si] ^ sm]; \
515 ctx->s[1][i] = mds[1][q0[q1[q1[(a) ^ sb] ^ sf] ^ sj] ^ sn]; \
516 ctx->s[2][i] = mds[2][q1[q0[q0[(a) ^ sc] ^ sg] ^ sk] ^ so]; \
517 ctx->s[3][i] = mds[3][q1[q1[q0[(b) ^ sd] ^ sh] ^ sl] ^ sp];
518
519/* Macros to calculate the whitening and round subkeys. CALC_K_2 computes the
520 * last two stages of the h() function for a given index (either 2i or 2i+1).
521 * a, b, c, and d are the four bytes going into the last two stages. For
522 * 128-bit keys, this is the entire h() function and a and c are the index
523 * preprocessed through q0 and q1 respectively; for longer keys they are the
524 * output of previous stages. j is the index of the first key byte to use.
525 * CALC_K computes a pair of subkeys for 128-bit Twofish, by calling CALC_K_2
526 * twice, doing the Pseudo-Hadamard Transform, and doing the necessary
527 * rotations. Its parameters are: a, the array to write the results into,
528 * j, the index of the first output entry, k and l, the preprocessed indices
529 * for index 2i, and m and n, the preprocessed indices for index 2i+1.
530 * CALC_K192_2 expands CALC_K_2 to handle 192-bit keys, by doing an
531 * additional lookup-and-XOR stage. The parameters a, b, c and d are the
532 * four bytes going into the last three stages. For 192-bit keys, c = d
533 * are the index preprocessed through q0, and a = b are the index
534 * preprocessed through q1; j is the index of the first key byte to use.
535 * CALC_K192 is identical to CALC_K but for using the CALC_K192_2 macro
536 * instead of CALC_K_2.
537 * CALC_K256_2 expands CALC_K192_2 to handle 256-bit keys, by doing an
538 * additional lookup-and-XOR stage. The parameters a and b are the index
539 * preprocessed through q0 and q1 respectively; j is the index of the first
540 * key byte to use. CALC_K256 is identical to CALC_K but for using the
541 * CALC_K256_2 macro instead of CALC_K_2. */
542
543#define CALC_K_2(a, b, c, d, j) \
544 mds[0][q0[a ^ key[(j) + 8]] ^ key[j]] \
545 ^ mds[1][q0[b ^ key[(j) + 9]] ^ key[(j) + 1]] \
546 ^ mds[2][q1[c ^ key[(j) + 10]] ^ key[(j) + 2]] \
547 ^ mds[3][q1[d ^ key[(j) + 11]] ^ key[(j) + 3]]
548
549#define CALC_K(a, j, k, l, m, n) \
550 x = CALC_K_2 (k, l, k, l, 0); \
551 y = CALC_K_2 (m, n, m, n, 4); \
552 y = rol32(y, 8); \
553 x += y; y += x; ctx->a[j] = x; \
554 ctx->a[(j) + 1] = rol32(y, 9)
555
556#define CALC_K192_2(a, b, c, d, j) \
557 CALC_K_2 (q0[a ^ key[(j) + 16]], \
558 q1[b ^ key[(j) + 17]], \
559 q0[c ^ key[(j) + 18]], \
560 q1[d ^ key[(j) + 19]], j)
561
562#define CALC_K192(a, j, k, l, m, n) \
563 x = CALC_K192_2 (l, l, k, k, 0); \
564 y = CALC_K192_2 (n, n, m, m, 4); \
565 y = rol32(y, 8); \
566 x += y; y += x; ctx->a[j] = x; \
567 ctx->a[(j) + 1] = rol32(y, 9)
568
569#define CALC_K256_2(a, b, j) \
570 CALC_K192_2 (q1[b ^ key[(j) + 24]], \
571 q1[a ^ key[(j) + 25]], \
572 q0[a ^ key[(j) + 26]], \
573 q0[b ^ key[(j) + 27]], j)
574
575#define CALC_K256(a, j, k, l, m, n) \
576 x = CALC_K256_2 (k, l, 0); \
577 y = CALC_K256_2 (m, n, 4); \
578 y = rol32(y, 8); \
579 x += y; y += x; ctx->a[j] = x; \
580 ctx->a[(j) + 1] = rol32(y, 9)
581
582/* Perform the key setup. */
583int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len)
584{
585
586 struct twofish_ctx *ctx = crypto_tfm_ctx(tfm);
587 u32 *flags = &tfm->crt_flags;
588
589 int i, j, k;
590
591 /* Temporaries for CALC_K. */
592 u32 x, y;
593
594 /* The S vector used to key the S-boxes, split up into individual bytes.
595 * 128-bit keys use only sa through sh; 256-bit use all of them. */
596 u8 sa = 0, sb = 0, sc = 0, sd = 0, se = 0, sf = 0, sg = 0, sh = 0;
597 u8 si = 0, sj = 0, sk = 0, sl = 0, sm = 0, sn = 0, so = 0, sp = 0;
598
599 /* Temporary for CALC_S. */
600 u8 tmp;
601
602 /* Check key length. */
603 if (key_len % 8)
604 {
605 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
606 return -EINVAL; /* unsupported key length */
607 }
608
609 /* Compute the first two words of the S vector. The magic numbers are
610 * the entries of the RS matrix, preprocessed through poly_to_exp. The
611 * numbers in the comments are the original (polynomial form) matrix
612 * entries. */
613 CALC_S (sa, sb, sc, sd, 0, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */
614 CALC_S (sa, sb, sc, sd, 1, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */
615 CALC_S (sa, sb, sc, sd, 2, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */
616 CALC_S (sa, sb, sc, sd, 3, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */
617 CALC_S (sa, sb, sc, sd, 4, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */
618 CALC_S (sa, sb, sc, sd, 5, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */
619 CALC_S (sa, sb, sc, sd, 6, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */
620 CALC_S (sa, sb, sc, sd, 7, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */
621 CALC_S (se, sf, sg, sh, 8, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */
622 CALC_S (se, sf, sg, sh, 9, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */
623 CALC_S (se, sf, sg, sh, 10, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */
624 CALC_S (se, sf, sg, sh, 11, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */
625 CALC_S (se, sf, sg, sh, 12, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */
626 CALC_S (se, sf, sg, sh, 13, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */
627 CALC_S (se, sf, sg, sh, 14, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */
628 CALC_S (se, sf, sg, sh, 15, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */
629
630 if (key_len == 24 || key_len == 32) { /* 192- or 256-bit key */
631 /* Calculate the third word of the S vector */
632 CALC_S (si, sj, sk, sl, 16, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */
633 CALC_S (si, sj, sk, sl, 17, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */
634 CALC_S (si, sj, sk, sl, 18, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */
635 CALC_S (si, sj, sk, sl, 19, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */
636 CALC_S (si, sj, sk, sl, 20, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */
637 CALC_S (si, sj, sk, sl, 21, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */
638 CALC_S (si, sj, sk, sl, 22, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */
639 CALC_S (si, sj, sk, sl, 23, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */
640 }
641
642 if (key_len == 32) { /* 256-bit key */
643 /* Calculate the fourth word of the S vector */
644 CALC_S (sm, sn, so, sp, 24, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */
645 CALC_S (sm, sn, so, sp, 25, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */
646 CALC_S (sm, sn, so, sp, 26, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */
647 CALC_S (sm, sn, so, sp, 27, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */
648 CALC_S (sm, sn, so, sp, 28, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */
649 CALC_S (sm, sn, so, sp, 29, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */
650 CALC_S (sm, sn, so, sp, 30, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */
651 CALC_S (sm, sn, so, sp, 31, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */
652
653 /* Compute the S-boxes. */
654 for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) {
655 CALC_SB256_2( i, calc_sb_tbl[j], calc_sb_tbl[k] );
656 }
657
658 /* Calculate whitening and round subkeys. The constants are
659 * indices of subkeys, preprocessed through q0 and q1. */
660 CALC_K256 (w, 0, 0xA9, 0x75, 0x67, 0xF3);
661 CALC_K256 (w, 2, 0xB3, 0xC6, 0xE8, 0xF4);
662 CALC_K256 (w, 4, 0x04, 0xDB, 0xFD, 0x7B);
663 CALC_K256 (w, 6, 0xA3, 0xFB, 0x76, 0xC8);
664 CALC_K256 (k, 0, 0x9A, 0x4A, 0x92, 0xD3);
665 CALC_K256 (k, 2, 0x80, 0xE6, 0x78, 0x6B);
666 CALC_K256 (k, 4, 0xE4, 0x45, 0xDD, 0x7D);
667 CALC_K256 (k, 6, 0xD1, 0xE8, 0x38, 0x4B);
668 CALC_K256 (k, 8, 0x0D, 0xD6, 0xC6, 0x32);
669 CALC_K256 (k, 10, 0x35, 0xD8, 0x98, 0xFD);
670 CALC_K256 (k, 12, 0x18, 0x37, 0xF7, 0x71);
671 CALC_K256 (k, 14, 0xEC, 0xF1, 0x6C, 0xE1);
672 CALC_K256 (k, 16, 0x43, 0x30, 0x75, 0x0F);
673 CALC_K256 (k, 18, 0x37, 0xF8, 0x26, 0x1B);
674 CALC_K256 (k, 20, 0xFA, 0x87, 0x13, 0xFA);
675 CALC_K256 (k, 22, 0x94, 0x06, 0x48, 0x3F);
676 CALC_K256 (k, 24, 0xF2, 0x5E, 0xD0, 0xBA);
677 CALC_K256 (k, 26, 0x8B, 0xAE, 0x30, 0x5B);
678 CALC_K256 (k, 28, 0x84, 0x8A, 0x54, 0x00);
679 CALC_K256 (k, 30, 0xDF, 0xBC, 0x23, 0x9D);
680 } else if (key_len == 24) { /* 192-bit key */
681 /* Compute the S-boxes. */
682 for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) {
683 CALC_SB192_2( i, calc_sb_tbl[j], calc_sb_tbl[k] );
684 }
685
686 /* Calculate whitening and round subkeys. The constants are
687 * indices of subkeys, preprocessed through q0 and q1. */
688 CALC_K192 (w, 0, 0xA9, 0x75, 0x67, 0xF3);
689 CALC_K192 (w, 2, 0xB3, 0xC6, 0xE8, 0xF4);
690 CALC_K192 (w, 4, 0x04, 0xDB, 0xFD, 0x7B);
691 CALC_K192 (w, 6, 0xA3, 0xFB, 0x76, 0xC8);
692 CALC_K192 (k, 0, 0x9A, 0x4A, 0x92, 0xD3);
693 CALC_K192 (k, 2, 0x80, 0xE6, 0x78, 0x6B);
694 CALC_K192 (k, 4, 0xE4, 0x45, 0xDD, 0x7D);
695 CALC_K192 (k, 6, 0xD1, 0xE8, 0x38, 0x4B);
696 CALC_K192 (k, 8, 0x0D, 0xD6, 0xC6, 0x32);
697 CALC_K192 (k, 10, 0x35, 0xD8, 0x98, 0xFD);
698 CALC_K192 (k, 12, 0x18, 0x37, 0xF7, 0x71);
699 CALC_K192 (k, 14, 0xEC, 0xF1, 0x6C, 0xE1);
700 CALC_K192 (k, 16, 0x43, 0x30, 0x75, 0x0F);
701 CALC_K192 (k, 18, 0x37, 0xF8, 0x26, 0x1B);
702 CALC_K192 (k, 20, 0xFA, 0x87, 0x13, 0xFA);
703 CALC_K192 (k, 22, 0x94, 0x06, 0x48, 0x3F);
704 CALC_K192 (k, 24, 0xF2, 0x5E, 0xD0, 0xBA);
705 CALC_K192 (k, 26, 0x8B, 0xAE, 0x30, 0x5B);
706 CALC_K192 (k, 28, 0x84, 0x8A, 0x54, 0x00);
707 CALC_K192 (k, 30, 0xDF, 0xBC, 0x23, 0x9D);
708 } else { /* 128-bit key */
709 /* Compute the S-boxes. */
710 for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) {
711 CALC_SB_2( i, calc_sb_tbl[j], calc_sb_tbl[k] );
712 }
713
714 /* Calculate whitening and round subkeys. The constants are
715 * indices of subkeys, preprocessed through q0 and q1. */
716 CALC_K (w, 0, 0xA9, 0x75, 0x67, 0xF3);
717 CALC_K (w, 2, 0xB3, 0xC6, 0xE8, 0xF4);
718 CALC_K (w, 4, 0x04, 0xDB, 0xFD, 0x7B);
719 CALC_K (w, 6, 0xA3, 0xFB, 0x76, 0xC8);
720 CALC_K (k, 0, 0x9A, 0x4A, 0x92, 0xD3);
721 CALC_K (k, 2, 0x80, 0xE6, 0x78, 0x6B);
722 CALC_K (k, 4, 0xE4, 0x45, 0xDD, 0x7D);
723 CALC_K (k, 6, 0xD1, 0xE8, 0x38, 0x4B);
724 CALC_K (k, 8, 0x0D, 0xD6, 0xC6, 0x32);
725 CALC_K (k, 10, 0x35, 0xD8, 0x98, 0xFD);
726 CALC_K (k, 12, 0x18, 0x37, 0xF7, 0x71);
727 CALC_K (k, 14, 0xEC, 0xF1, 0x6C, 0xE1);
728 CALC_K (k, 16, 0x43, 0x30, 0x75, 0x0F);
729 CALC_K (k, 18, 0x37, 0xF8, 0x26, 0x1B);
730 CALC_K (k, 20, 0xFA, 0x87, 0x13, 0xFA);
731 CALC_K (k, 22, 0x94, 0x06, 0x48, 0x3F);
732 CALC_K (k, 24, 0xF2, 0x5E, 0xD0, 0xBA);
733 CALC_K (k, 26, 0x8B, 0xAE, 0x30, 0x5B);
734 CALC_K (k, 28, 0x84, 0x8A, 0x54, 0x00);
735 CALC_K (k, 30, 0xDF, 0xBC, 0x23, 0x9D);
736 }
737
738 return 0;
739}
740
741EXPORT_SYMBOL_GPL(twofish_setkey);
742
743MODULE_LICENSE("GPL");
744MODULE_DESCRIPTION("Twofish cipher common functions");
diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c
index 3d4261c39f16..40535036e893 100644
--- a/drivers/block/cryptoloop.c
+++ b/drivers/block/cryptoloop.c
@@ -40,11 +40,13 @@ static int
40cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info) 40cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
41{ 41{
42 int err = -EINVAL; 42 int err = -EINVAL;
43 int cipher_len;
44 int mode_len;
43 char cms[LO_NAME_SIZE]; /* cipher-mode string */ 45 char cms[LO_NAME_SIZE]; /* cipher-mode string */
44 char *cipher; 46 char *cipher;
45 char *mode; 47 char *mode;
46 char *cmsp = cms; /* c-m string pointer */ 48 char *cmsp = cms; /* c-m string pointer */
47 struct crypto_tfm *tfm = NULL; 49 struct crypto_blkcipher *tfm;
48 50
49 /* encryption breaks for non sector aligned offsets */ 51 /* encryption breaks for non sector aligned offsets */
50 52
@@ -53,20 +55,39 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
53 55
54 strncpy(cms, info->lo_crypt_name, LO_NAME_SIZE); 56 strncpy(cms, info->lo_crypt_name, LO_NAME_SIZE);
55 cms[LO_NAME_SIZE - 1] = 0; 57 cms[LO_NAME_SIZE - 1] = 0;
56 cipher = strsep(&cmsp, "-"); 58
57 mode = strsep(&cmsp, "-"); 59 cipher = cmsp;
58 60 cipher_len = strcspn(cmsp, "-");
59 if (mode == NULL || strcmp(mode, "cbc") == 0) 61
60 tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_CBC | 62 mode = cmsp + cipher_len;
61 CRYPTO_TFM_REQ_MAY_SLEEP); 63 mode_len = 0;
62 else if (strcmp(mode, "ecb") == 0) 64 if (*mode) {
63 tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_ECB | 65 mode++;
64 CRYPTO_TFM_REQ_MAY_SLEEP); 66 mode_len = strcspn(mode, "-");
65 if (tfm == NULL) 67 }
68
69 if (!mode_len) {
70 mode = "cbc";
71 mode_len = 3;
72 }
73
74 if (cipher_len + mode_len + 3 > LO_NAME_SIZE)
66 return -EINVAL; 75 return -EINVAL;
67 76
68 err = tfm->crt_u.cipher.cit_setkey(tfm, info->lo_encrypt_key, 77 memmove(cms, mode, mode_len);
69 info->lo_encrypt_key_size); 78 cmsp = cms + mode_len;
79 *cmsp++ = '(';
80 memcpy(cmsp, info->lo_crypt_name, cipher_len);
81 cmsp += cipher_len;
82 *cmsp++ = ')';
83 *cmsp = 0;
84
85 tfm = crypto_alloc_blkcipher(cms, 0, CRYPTO_ALG_ASYNC);
86 if (IS_ERR(tfm))
87 return PTR_ERR(tfm);
88
89 err = crypto_blkcipher_setkey(tfm, info->lo_encrypt_key,
90 info->lo_encrypt_key_size);
70 91
71 if (err != 0) 92 if (err != 0)
72 goto out_free_tfm; 93 goto out_free_tfm;
@@ -75,99 +96,49 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
75 return 0; 96 return 0;
76 97
77 out_free_tfm: 98 out_free_tfm:
78 crypto_free_tfm(tfm); 99 crypto_free_blkcipher(tfm);
79 100
80 out: 101 out:
81 return err; 102 return err;
82} 103}
83 104
84 105
85typedef int (*encdec_ecb_t)(struct crypto_tfm *tfm, 106typedef int (*encdec_cbc_t)(struct blkcipher_desc *desc,
86 struct scatterlist *sg_out, 107 struct scatterlist *sg_out,
87 struct scatterlist *sg_in, 108 struct scatterlist *sg_in,
88 unsigned int nsg); 109 unsigned int nsg);
89 110
90
91static int
92cryptoloop_transfer_ecb(struct loop_device *lo, int cmd,
93 struct page *raw_page, unsigned raw_off,
94 struct page *loop_page, unsigned loop_off,
95 int size, sector_t IV)
96{
97 struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data;
98 struct scatterlist sg_out = { NULL, };
99 struct scatterlist sg_in = { NULL, };
100
101 encdec_ecb_t encdecfunc;
102 struct page *in_page, *out_page;
103 unsigned in_offs, out_offs;
104
105 if (cmd == READ) {
106 in_page = raw_page;
107 in_offs = raw_off;
108 out_page = loop_page;
109 out_offs = loop_off;
110 encdecfunc = tfm->crt_u.cipher.cit_decrypt;
111 } else {
112 in_page = loop_page;
113 in_offs = loop_off;
114 out_page = raw_page;
115 out_offs = raw_off;
116 encdecfunc = tfm->crt_u.cipher.cit_encrypt;
117 }
118
119 while (size > 0) {
120 const int sz = min(size, LOOP_IV_SECTOR_SIZE);
121
122 sg_in.page = in_page;
123 sg_in.offset = in_offs;
124 sg_in.length = sz;
125
126 sg_out.page = out_page;
127 sg_out.offset = out_offs;
128 sg_out.length = sz;
129
130 encdecfunc(tfm, &sg_out, &sg_in, sz);
131
132 size -= sz;
133 in_offs += sz;
134 out_offs += sz;
135 }
136
137 return 0;
138}
139
140typedef int (*encdec_cbc_t)(struct crypto_tfm *tfm,
141 struct scatterlist *sg_out,
142 struct scatterlist *sg_in,
143 unsigned int nsg, u8 *iv);
144
145static int 111static int
146cryptoloop_transfer_cbc(struct loop_device *lo, int cmd, 112cryptoloop_transfer(struct loop_device *lo, int cmd,
147 struct page *raw_page, unsigned raw_off, 113 struct page *raw_page, unsigned raw_off,
148 struct page *loop_page, unsigned loop_off, 114 struct page *loop_page, unsigned loop_off,
149 int size, sector_t IV) 115 int size, sector_t IV)
150{ 116{
151 struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data; 117 struct crypto_blkcipher *tfm = lo->key_data;
118 struct blkcipher_desc desc = {
119 .tfm = tfm,
120 .flags = CRYPTO_TFM_REQ_MAY_SLEEP,
121 };
152 struct scatterlist sg_out = { NULL, }; 122 struct scatterlist sg_out = { NULL, };
153 struct scatterlist sg_in = { NULL, }; 123 struct scatterlist sg_in = { NULL, };
154 124
155 encdec_cbc_t encdecfunc; 125 encdec_cbc_t encdecfunc;
156 struct page *in_page, *out_page; 126 struct page *in_page, *out_page;
157 unsigned in_offs, out_offs; 127 unsigned in_offs, out_offs;
128 int err;
158 129
159 if (cmd == READ) { 130 if (cmd == READ) {
160 in_page = raw_page; 131 in_page = raw_page;
161 in_offs = raw_off; 132 in_offs = raw_off;
162 out_page = loop_page; 133 out_page = loop_page;
163 out_offs = loop_off; 134 out_offs = loop_off;
164 encdecfunc = tfm->crt_u.cipher.cit_decrypt_iv; 135 encdecfunc = crypto_blkcipher_crt(tfm)->decrypt;
165 } else { 136 } else {
166 in_page = loop_page; 137 in_page = loop_page;
167 in_offs = loop_off; 138 in_offs = loop_off;
168 out_page = raw_page; 139 out_page = raw_page;
169 out_offs = raw_off; 140 out_offs = raw_off;
170 encdecfunc = tfm->crt_u.cipher.cit_encrypt_iv; 141 encdecfunc = crypto_blkcipher_crt(tfm)->encrypt;
171 } 142 }
172 143
173 while (size > 0) { 144 while (size > 0) {
@@ -183,7 +154,10 @@ cryptoloop_transfer_cbc(struct loop_device *lo, int cmd,
183 sg_out.offset = out_offs; 154 sg_out.offset = out_offs;
184 sg_out.length = sz; 155 sg_out.length = sz;
185 156
186 encdecfunc(tfm, &sg_out, &sg_in, sz, (u8 *)iv); 157 desc.info = iv;
158 err = encdecfunc(&desc, &sg_out, &sg_in, sz);
159 if (err)
160 return err;
187 161
188 IV++; 162 IV++;
189 size -= sz; 163 size -= sz;
@@ -195,32 +169,6 @@ cryptoloop_transfer_cbc(struct loop_device *lo, int cmd,
195} 169}
196 170
197static int 171static int
198cryptoloop_transfer(struct loop_device *lo, int cmd,
199 struct page *raw_page, unsigned raw_off,
200 struct page *loop_page, unsigned loop_off,
201 int size, sector_t IV)
202{
203 struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data;
204 if(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_ECB)
205 {
206 lo->transfer = cryptoloop_transfer_ecb;
207 return cryptoloop_transfer_ecb(lo, cmd, raw_page, raw_off,
208 loop_page, loop_off, size, IV);
209 }
210 if(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_CBC)
211 {
212 lo->transfer = cryptoloop_transfer_cbc;
213 return cryptoloop_transfer_cbc(lo, cmd, raw_page, raw_off,
214 loop_page, loop_off, size, IV);
215 }
216
217 /* This is not supposed to happen */
218
219 printk( KERN_ERR "cryptoloop: unsupported cipher mode in cryptoloop_transfer!\n");
220 return -EINVAL;
221}
222
223static int
224cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg) 172cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg)
225{ 173{
226 return -EINVAL; 174 return -EINVAL;
@@ -229,9 +177,9 @@ cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg)
229static int 177static int
230cryptoloop_release(struct loop_device *lo) 178cryptoloop_release(struct loop_device *lo)
231{ 179{
232 struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data; 180 struct crypto_blkcipher *tfm = lo->key_data;
233 if (tfm != NULL) { 181 if (tfm != NULL) {
234 crypto_free_tfm(tfm); 182 crypto_free_blkcipher(tfm);
235 lo->key_data = NULL; 183 lo->key_data = NULL;
236 return 0; 184 return 0;
237 } 185 }
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 4263935443cc..adb554153f67 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -2,22 +2,53 @@ menu "Hardware crypto devices"
2 2
3config CRYPTO_DEV_PADLOCK 3config CRYPTO_DEV_PADLOCK
4 tristate "Support for VIA PadLock ACE" 4 tristate "Support for VIA PadLock ACE"
5 depends on CRYPTO && X86_32 5 depends on X86_32
6 select CRYPTO_ALGAPI
7 default m
6 help 8 help
7 Some VIA processors come with an integrated crypto engine 9 Some VIA processors come with an integrated crypto engine
8 (so called VIA PadLock ACE, Advanced Cryptography Engine) 10 (so called VIA PadLock ACE, Advanced Cryptography Engine)
9 that provides instructions for very fast {en,de}cryption 11 that provides instructions for very fast cryptographic
10 with some algorithms. 12 operations with supported algorithms.
11 13
12 The instructions are used only when the CPU supports them. 14 The instructions are used only when the CPU supports them.
13 Otherwise software encryption is used. If you are unsure, 15 Otherwise software encryption is used.
14 say Y. 16
17 Selecting M for this option will compile a helper module
18 padlock.ko that should autoload all below configured
19 algorithms. Don't worry if your hardware does not support
20 some or all of them. In such case padlock.ko will
21 simply write a single line into the kernel log informing
22 about its failure but everything will keep working fine.
23
24 If you are unsure, say M. The compiled module will be
25 called padlock.ko
15 26
16config CRYPTO_DEV_PADLOCK_AES 27config CRYPTO_DEV_PADLOCK_AES
17 bool "Support for AES in VIA PadLock" 28 tristate "PadLock driver for AES algorithm"
18 depends on CRYPTO_DEV_PADLOCK 29 depends on CRYPTO_DEV_PADLOCK
19 default y 30 select CRYPTO_BLKCIPHER
31 default m
20 help 32 help
21 Use VIA PadLock for AES algorithm. 33 Use VIA PadLock for AES algorithm.
22 34
35 Available in VIA C3 and newer CPUs.
36
37 If unsure say M. The compiled module will be
38 called padlock-aes.ko
39
40config CRYPTO_DEV_PADLOCK_SHA
41 tristate "PadLock driver for SHA1 and SHA256 algorithms"
42 depends on CRYPTO_DEV_PADLOCK
43 select CRYPTO_SHA1
44 select CRYPTO_SHA256
45 default m
46 help
47 Use VIA PadLock for SHA1/SHA256 algorithms.
48
49 Available in VIA C7 and newer processors.
50
51 If unsure say M. The compiled module will be
52 called padlock-sha.ko
53
23endmenu 54endmenu
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 45426ca19a23..4c3d0ec1cf80 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -1,7 +1,3 @@
1
2obj-$(CONFIG_CRYPTO_DEV_PADLOCK) += padlock.o 1obj-$(CONFIG_CRYPTO_DEV_PADLOCK) += padlock.o
3 2obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
4padlock-objs-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o 3obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
5
6padlock-objs := padlock-generic.o $(padlock-objs-y)
7
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index b643d71298a9..d4501dc7e650 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -43,11 +43,11 @@
43 * --------------------------------------------------------------------------- 43 * ---------------------------------------------------------------------------
44 */ 44 */
45 45
46#include <crypto/algapi.h>
46#include <linux/module.h> 47#include <linux/module.h>
47#include <linux/init.h> 48#include <linux/init.h>
48#include <linux/types.h> 49#include <linux/types.h>
49#include <linux/errno.h> 50#include <linux/errno.h>
50#include <linux/crypto.h>
51#include <linux/interrupt.h> 51#include <linux/interrupt.h>
52#include <linux/kernel.h> 52#include <linux/kernel.h>
53#include <asm/byteorder.h> 53#include <asm/byteorder.h>
@@ -59,6 +59,17 @@
59#define AES_EXTENDED_KEY_SIZE 64 /* in uint32_t units */ 59#define AES_EXTENDED_KEY_SIZE 64 /* in uint32_t units */
60#define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t)) 60#define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t))
61 61
62/* Control word. */
63struct cword {
64 unsigned int __attribute__ ((__packed__))
65 rounds:4,
66 algo:3,
67 keygen:1,
68 interm:1,
69 encdec:1,
70 ksize:2;
71} __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
72
62/* Whenever making any changes to the following 73/* Whenever making any changes to the following
63 * structure *make sure* you keep E, d_data 74 * structure *make sure* you keep E, d_data
64 * and cword aligned on 16 Bytes boundaries!!! */ 75 * and cword aligned on 16 Bytes boundaries!!! */
@@ -286,9 +297,9 @@ aes_hw_extkey_available(uint8_t key_len)
286 return 0; 297 return 0;
287} 298}
288 299
289static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) 300static inline struct aes_ctx *aes_ctx_common(void *ctx)
290{ 301{
291 unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm); 302 unsigned long addr = (unsigned long)ctx;
292 unsigned long align = PADLOCK_ALIGNMENT; 303 unsigned long align = PADLOCK_ALIGNMENT;
293 304
294 if (align <= crypto_tfm_ctx_alignment()) 305 if (align <= crypto_tfm_ctx_alignment())
@@ -296,16 +307,27 @@ static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
296 return (struct aes_ctx *)ALIGN(addr, align); 307 return (struct aes_ctx *)ALIGN(addr, align);
297} 308}
298 309
310static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
311{
312 return aes_ctx_common(crypto_tfm_ctx(tfm));
313}
314
315static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm)
316{
317 return aes_ctx_common(crypto_blkcipher_ctx(tfm));
318}
319
299static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 320static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
300 unsigned int key_len, u32 *flags) 321 unsigned int key_len)
301{ 322{
302 struct aes_ctx *ctx = aes_ctx(tfm); 323 struct aes_ctx *ctx = aes_ctx(tfm);
303 const __le32 *key = (const __le32 *)in_key; 324 const __le32 *key = (const __le32 *)in_key;
325 u32 *flags = &tfm->crt_flags;
304 uint32_t i, t, u, v, w; 326 uint32_t i, t, u, v, w;
305 uint32_t P[AES_EXTENDED_KEY_SIZE]; 327 uint32_t P[AES_EXTENDED_KEY_SIZE];
306 uint32_t rounds; 328 uint32_t rounds;
307 329
308 if (key_len != 16 && key_len != 24 && key_len != 32) { 330 if (key_len % 8) {
309 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 331 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
310 return -EINVAL; 332 return -EINVAL;
311 } 333 }
@@ -430,80 +452,212 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
430 padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 1); 452 padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 1);
431} 453}
432 454
433static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out, 455static struct crypto_alg aes_alg = {
434 const u8 *in, unsigned int nbytes) 456 .cra_name = "aes",
457 .cra_driver_name = "aes-padlock",
458 .cra_priority = PADLOCK_CRA_PRIORITY,
459 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
460 .cra_blocksize = AES_BLOCK_SIZE,
461 .cra_ctxsize = sizeof(struct aes_ctx),
462 .cra_alignmask = PADLOCK_ALIGNMENT - 1,
463 .cra_module = THIS_MODULE,
464 .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
465 .cra_u = {
466 .cipher = {
467 .cia_min_keysize = AES_MIN_KEY_SIZE,
468 .cia_max_keysize = AES_MAX_KEY_SIZE,
469 .cia_setkey = aes_set_key,
470 .cia_encrypt = aes_encrypt,
471 .cia_decrypt = aes_decrypt,
472 }
473 }
474};
475
476static int ecb_aes_encrypt(struct blkcipher_desc *desc,
477 struct scatterlist *dst, struct scatterlist *src,
478 unsigned int nbytes)
435{ 479{
436 struct aes_ctx *ctx = aes_ctx(desc->tfm); 480 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
437 padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt, 481 struct blkcipher_walk walk;
438 nbytes / AES_BLOCK_SIZE); 482 int err;
439 return nbytes & ~(AES_BLOCK_SIZE - 1); 483
484 blkcipher_walk_init(&walk, dst, src, nbytes);
485 err = blkcipher_walk_virt(desc, &walk);
486
487 while ((nbytes = walk.nbytes)) {
488 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
489 ctx->E, &ctx->cword.encrypt,
490 nbytes / AES_BLOCK_SIZE);
491 nbytes &= AES_BLOCK_SIZE - 1;
492 err = blkcipher_walk_done(desc, &walk, nbytes);
493 }
494
495 return err;
440} 496}
441 497
442static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out, 498static int ecb_aes_decrypt(struct blkcipher_desc *desc,
443 const u8 *in, unsigned int nbytes) 499 struct scatterlist *dst, struct scatterlist *src,
500 unsigned int nbytes)
444{ 501{
445 struct aes_ctx *ctx = aes_ctx(desc->tfm); 502 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
446 padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 503 struct blkcipher_walk walk;
447 nbytes / AES_BLOCK_SIZE); 504 int err;
448 return nbytes & ~(AES_BLOCK_SIZE - 1); 505
506 blkcipher_walk_init(&walk, dst, src, nbytes);
507 err = blkcipher_walk_virt(desc, &walk);
508
509 while ((nbytes = walk.nbytes)) {
510 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
511 ctx->D, &ctx->cword.decrypt,
512 nbytes / AES_BLOCK_SIZE);
513 nbytes &= AES_BLOCK_SIZE - 1;
514 err = blkcipher_walk_done(desc, &walk, nbytes);
515 }
516
517 return err;
449} 518}
450 519
451static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out, 520static struct crypto_alg ecb_aes_alg = {
452 const u8 *in, unsigned int nbytes) 521 .cra_name = "ecb(aes)",
453{ 522 .cra_driver_name = "ecb-aes-padlock",
454 struct aes_ctx *ctx = aes_ctx(desc->tfm); 523 .cra_priority = PADLOCK_COMPOSITE_PRIORITY,
455 u8 *iv; 524 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
525 .cra_blocksize = AES_BLOCK_SIZE,
526 .cra_ctxsize = sizeof(struct aes_ctx),
527 .cra_alignmask = PADLOCK_ALIGNMENT - 1,
528 .cra_type = &crypto_blkcipher_type,
529 .cra_module = THIS_MODULE,
530 .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list),
531 .cra_u = {
532 .blkcipher = {
533 .min_keysize = AES_MIN_KEY_SIZE,
534 .max_keysize = AES_MAX_KEY_SIZE,
535 .setkey = aes_set_key,
536 .encrypt = ecb_aes_encrypt,
537 .decrypt = ecb_aes_decrypt,
538 }
539 }
540};
456 541
457 iv = padlock_xcrypt_cbc(in, out, ctx->E, desc->info, 542static int cbc_aes_encrypt(struct blkcipher_desc *desc,
458 &ctx->cword.encrypt, nbytes / AES_BLOCK_SIZE); 543 struct scatterlist *dst, struct scatterlist *src,
459 memcpy(desc->info, iv, AES_BLOCK_SIZE); 544 unsigned int nbytes)
545{
546 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
547 struct blkcipher_walk walk;
548 int err;
549
550 blkcipher_walk_init(&walk, dst, src, nbytes);
551 err = blkcipher_walk_virt(desc, &walk);
552
553 while ((nbytes = walk.nbytes)) {
554 u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
555 walk.dst.virt.addr, ctx->E,
556 walk.iv, &ctx->cword.encrypt,
557 nbytes / AES_BLOCK_SIZE);
558 memcpy(walk.iv, iv, AES_BLOCK_SIZE);
559 nbytes &= AES_BLOCK_SIZE - 1;
560 err = blkcipher_walk_done(desc, &walk, nbytes);
561 }
460 562
461 return nbytes & ~(AES_BLOCK_SIZE - 1); 563 return err;
462} 564}
463 565
464static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out, 566static int cbc_aes_decrypt(struct blkcipher_desc *desc,
465 const u8 *in, unsigned int nbytes) 567 struct scatterlist *dst, struct scatterlist *src,
568 unsigned int nbytes)
466{ 569{
467 struct aes_ctx *ctx = aes_ctx(desc->tfm); 570 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
468 padlock_xcrypt_cbc(in, out, ctx->D, desc->info, &ctx->cword.decrypt, 571 struct blkcipher_walk walk;
469 nbytes / AES_BLOCK_SIZE); 572 int err;
470 return nbytes & ~(AES_BLOCK_SIZE - 1); 573
574 blkcipher_walk_init(&walk, dst, src, nbytes);
575 err = blkcipher_walk_virt(desc, &walk);
576
577 while ((nbytes = walk.nbytes)) {
578 padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
579 ctx->D, walk.iv, &ctx->cword.decrypt,
580 nbytes / AES_BLOCK_SIZE);
581 nbytes &= AES_BLOCK_SIZE - 1;
582 err = blkcipher_walk_done(desc, &walk, nbytes);
583 }
584
585 return err;
471} 586}
472 587
473static struct crypto_alg aes_alg = { 588static struct crypto_alg cbc_aes_alg = {
474 .cra_name = "aes", 589 .cra_name = "cbc(aes)",
475 .cra_driver_name = "aes-padlock", 590 .cra_driver_name = "cbc-aes-padlock",
476 .cra_priority = 300, 591 .cra_priority = PADLOCK_COMPOSITE_PRIORITY,
477 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 592 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
478 .cra_blocksize = AES_BLOCK_SIZE, 593 .cra_blocksize = AES_BLOCK_SIZE,
479 .cra_ctxsize = sizeof(struct aes_ctx), 594 .cra_ctxsize = sizeof(struct aes_ctx),
480 .cra_alignmask = PADLOCK_ALIGNMENT - 1, 595 .cra_alignmask = PADLOCK_ALIGNMENT - 1,
596 .cra_type = &crypto_blkcipher_type,
481 .cra_module = THIS_MODULE, 597 .cra_module = THIS_MODULE,
482 .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), 598 .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list),
483 .cra_u = { 599 .cra_u = {
484 .cipher = { 600 .blkcipher = {
485 .cia_min_keysize = AES_MIN_KEY_SIZE, 601 .min_keysize = AES_MIN_KEY_SIZE,
486 .cia_max_keysize = AES_MAX_KEY_SIZE, 602 .max_keysize = AES_MAX_KEY_SIZE,
487 .cia_setkey = aes_set_key, 603 .ivsize = AES_BLOCK_SIZE,
488 .cia_encrypt = aes_encrypt, 604 .setkey = aes_set_key,
489 .cia_decrypt = aes_decrypt, 605 .encrypt = cbc_aes_encrypt,
490 .cia_encrypt_ecb = aes_encrypt_ecb, 606 .decrypt = cbc_aes_decrypt,
491 .cia_decrypt_ecb = aes_decrypt_ecb,
492 .cia_encrypt_cbc = aes_encrypt_cbc,
493 .cia_decrypt_cbc = aes_decrypt_cbc,
494 } 607 }
495 } 608 }
496}; 609};
497 610
498int __init padlock_init_aes(void) 611static int __init padlock_init(void)
499{ 612{
500 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); 613 int ret;
614
615 if (!cpu_has_xcrypt) {
616 printk(KERN_ERR PFX "VIA PadLock not detected.\n");
617 return -ENODEV;
618 }
619
620 if (!cpu_has_xcrypt_enabled) {
621 printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
622 return -ENODEV;
623 }
501 624
502 gen_tabs(); 625 gen_tabs();
503 return crypto_register_alg(&aes_alg); 626 if ((ret = crypto_register_alg(&aes_alg)))
627 goto aes_err;
628
629 if ((ret = crypto_register_alg(&ecb_aes_alg)))
630 goto ecb_aes_err;
631
632 if ((ret = crypto_register_alg(&cbc_aes_alg)))
633 goto cbc_aes_err;
634
635 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
636
637out:
638 return ret;
639
640cbc_aes_err:
641 crypto_unregister_alg(&ecb_aes_alg);
642ecb_aes_err:
643 crypto_unregister_alg(&aes_alg);
644aes_err:
645 printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
646 goto out;
504} 647}
505 648
506void __exit padlock_fini_aes(void) 649static void __exit padlock_fini(void)
507{ 650{
651 crypto_unregister_alg(&cbc_aes_alg);
652 crypto_unregister_alg(&ecb_aes_alg);
508 crypto_unregister_alg(&aes_alg); 653 crypto_unregister_alg(&aes_alg);
509} 654}
655
656module_init(padlock_init);
657module_exit(padlock_fini);
658
659MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
660MODULE_LICENSE("GPL");
661MODULE_AUTHOR("Michal Ludvig");
662
663MODULE_ALIAS("aes-padlock");
diff --git a/drivers/crypto/padlock-generic.c b/drivers/crypto/padlock-generic.c
deleted file mode 100644
index 18cf0e8274a7..000000000000
--- a/drivers/crypto/padlock-generic.c
+++ /dev/null
@@ -1,63 +0,0 @@
1/*
2 * Cryptographic API.
3 *
4 * Support for VIA PadLock hardware crypto engine.
5 *
6 * Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/crypto.h>
19#include <asm/byteorder.h>
20#include "padlock.h"
21
22static int __init
23padlock_init(void)
24{
25 int ret = -ENOSYS;
26
27 if (!cpu_has_xcrypt) {
28 printk(KERN_ERR PFX "VIA PadLock not detected.\n");
29 return -ENODEV;
30 }
31
32 if (!cpu_has_xcrypt_enabled) {
33 printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
34 return -ENODEV;
35 }
36
37#ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES
38 if ((ret = padlock_init_aes())) {
39 printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
40 return ret;
41 }
42#endif
43
44 if (ret == -ENOSYS)
45 printk(KERN_ERR PFX "Hmm, VIA PadLock was compiled without any algorithm.\n");
46
47 return ret;
48}
49
50static void __exit
51padlock_fini(void)
52{
53#ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES
54 padlock_fini_aes();
55#endif
56}
57
58module_init(padlock_init);
59module_exit(padlock_fini);
60
61MODULE_DESCRIPTION("VIA PadLock crypto engine support.");
62MODULE_LICENSE("Dual BSD/GPL");
63MODULE_AUTHOR("Michal Ludvig");
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
new file mode 100644
index 000000000000..a781fd23b607
--- /dev/null
+++ b/drivers/crypto/padlock-sha.c
@@ -0,0 +1,318 @@
1/*
2 * Cryptographic API.
3 *
4 * Support for VIA PadLock hardware crypto engine.
5 *
6 * Copyright (c) 2006 Michal Ludvig <michal@logix.cz>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 */
14
15#include <crypto/algapi.h>
16#include <linux/err.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/errno.h>
20#include <linux/cryptohash.h>
21#include <linux/interrupt.h>
22#include <linux/kernel.h>
23#include <linux/scatterlist.h>
24#include "padlock.h"
25
26#define SHA1_DEFAULT_FALLBACK "sha1-generic"
27#define SHA1_DIGEST_SIZE 20
28#define SHA1_HMAC_BLOCK_SIZE 64
29
30#define SHA256_DEFAULT_FALLBACK "sha256-generic"
31#define SHA256_DIGEST_SIZE 32
32#define SHA256_HMAC_BLOCK_SIZE 64
33
34struct padlock_sha_ctx {
35 char *data;
36 size_t used;
37 int bypass;
38 void (*f_sha_padlock)(const char *in, char *out, int count);
39 struct hash_desc fallback;
40};
41
42static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm)
43{
44 return crypto_tfm_ctx(tfm);
45}
46
47/* We'll need aligned address on the stack */
48#define NEAREST_ALIGNED(ptr) \
49 ((void *)ALIGN((size_t)(ptr), PADLOCK_ALIGNMENT))
50
51static struct crypto_alg sha1_alg, sha256_alg;
52
53static void padlock_sha_bypass(struct crypto_tfm *tfm)
54{
55 if (ctx(tfm)->bypass)
56 return;
57
58 crypto_hash_init(&ctx(tfm)->fallback);
59 if (ctx(tfm)->data && ctx(tfm)->used) {
60 struct scatterlist sg;
61
62 sg_set_buf(&sg, ctx(tfm)->data, ctx(tfm)->used);
63 crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length);
64 }
65
66 ctx(tfm)->used = 0;
67 ctx(tfm)->bypass = 1;
68}
69
70static void padlock_sha_init(struct crypto_tfm *tfm)
71{
72 ctx(tfm)->used = 0;
73 ctx(tfm)->bypass = 0;
74}
75
76static void padlock_sha_update(struct crypto_tfm *tfm,
77 const uint8_t *data, unsigned int length)
78{
79 /* Our buffer is always one page. */
80 if (unlikely(!ctx(tfm)->bypass &&
81 (ctx(tfm)->used + length > PAGE_SIZE)))
82 padlock_sha_bypass(tfm);
83
84 if (unlikely(ctx(tfm)->bypass)) {
85 struct scatterlist sg;
86 sg_set_buf(&sg, (uint8_t *)data, length);
87 crypto_hash_update(&ctx(tfm)->fallback, &sg, length);
88 return;
89 }
90
91 memcpy(ctx(tfm)->data + ctx(tfm)->used, data, length);
92 ctx(tfm)->used += length;
93}
94
95static inline void padlock_output_block(uint32_t *src,
96 uint32_t *dst, size_t count)
97{
98 while (count--)
99 *dst++ = swab32(*src++);
100}
101
102static void padlock_do_sha1(const char *in, char *out, int count)
103{
104 /* We can't store directly to *out as it may be unaligned. */
105 /* BTW Don't reduce the buffer size below 128 Bytes!
106 * PadLock microcode needs it that big. */
107 char buf[128+16];
108 char *result = NEAREST_ALIGNED(buf);
109
110 ((uint32_t *)result)[0] = 0x67452301;
111 ((uint32_t *)result)[1] = 0xEFCDAB89;
112 ((uint32_t *)result)[2] = 0x98BADCFE;
113 ((uint32_t *)result)[3] = 0x10325476;
114 ((uint32_t *)result)[4] = 0xC3D2E1F0;
115
116 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
117 : "+S"(in), "+D"(result)
118 : "c"(count), "a"(0));
119
120 padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
121}
122
123static void padlock_do_sha256(const char *in, char *out, int count)
124{
125 /* We can't store directly to *out as it may be unaligned. */
126 /* BTW Don't reduce the buffer size below 128 Bytes!
127 * PadLock microcode needs it that big. */
128 char buf[128+16];
129 char *result = NEAREST_ALIGNED(buf);
130
131 ((uint32_t *)result)[0] = 0x6A09E667;
132 ((uint32_t *)result)[1] = 0xBB67AE85;
133 ((uint32_t *)result)[2] = 0x3C6EF372;
134 ((uint32_t *)result)[3] = 0xA54FF53A;
135 ((uint32_t *)result)[4] = 0x510E527F;
136 ((uint32_t *)result)[5] = 0x9B05688C;
137 ((uint32_t *)result)[6] = 0x1F83D9AB;
138 ((uint32_t *)result)[7] = 0x5BE0CD19;
139
140 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
141 : "+S"(in), "+D"(result)
142 : "c"(count), "a"(0));
143
144 padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
145}
146
147static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out)
148{
149 if (unlikely(ctx(tfm)->bypass)) {
150 crypto_hash_final(&ctx(tfm)->fallback, out);
151 ctx(tfm)->bypass = 0;
152 return;
153 }
154
155 /* Pass the input buffer to PadLock microcode... */
156 ctx(tfm)->f_sha_padlock(ctx(tfm)->data, out, ctx(tfm)->used);
157
158 ctx(tfm)->used = 0;
159}
160
161static int padlock_cra_init(struct crypto_tfm *tfm)
162{
163 const char *fallback_driver_name = tfm->__crt_alg->cra_name;
164 struct crypto_hash *fallback_tfm;
165
166 /* For now we'll allocate one page. This
167 * could eventually be configurable one day. */
168 ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL);
169 if (!ctx(tfm)->data)
170 return -ENOMEM;
171
172 /* Allocate a fallback and abort if it failed. */
173 fallback_tfm = crypto_alloc_hash(fallback_driver_name, 0,
174 CRYPTO_ALG_ASYNC |
175 CRYPTO_ALG_NEED_FALLBACK);
176 if (IS_ERR(fallback_tfm)) {
177 printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
178 fallback_driver_name);
179 free_page((unsigned long)(ctx(tfm)->data));
180 return PTR_ERR(fallback_tfm);
181 }
182
183 ctx(tfm)->fallback.tfm = fallback_tfm;
184 return 0;
185}
186
187static int padlock_sha1_cra_init(struct crypto_tfm *tfm)
188{
189 ctx(tfm)->f_sha_padlock = padlock_do_sha1;
190
191 return padlock_cra_init(tfm);
192}
193
194static int padlock_sha256_cra_init(struct crypto_tfm *tfm)
195{
196 ctx(tfm)->f_sha_padlock = padlock_do_sha256;
197
198 return padlock_cra_init(tfm);
199}
200
201static void padlock_cra_exit(struct crypto_tfm *tfm)
202{
203 if (ctx(tfm)->data) {
204 free_page((unsigned long)(ctx(tfm)->data));
205 ctx(tfm)->data = NULL;
206 }
207
208 crypto_free_hash(ctx(tfm)->fallback.tfm);
209 ctx(tfm)->fallback.tfm = NULL;
210}
211
212static struct crypto_alg sha1_alg = {
213 .cra_name = "sha1",
214 .cra_driver_name = "sha1-padlock",
215 .cra_priority = PADLOCK_CRA_PRIORITY,
216 .cra_flags = CRYPTO_ALG_TYPE_DIGEST |
217 CRYPTO_ALG_NEED_FALLBACK,
218 .cra_blocksize = SHA1_HMAC_BLOCK_SIZE,
219 .cra_ctxsize = sizeof(struct padlock_sha_ctx),
220 .cra_module = THIS_MODULE,
221 .cra_list = LIST_HEAD_INIT(sha1_alg.cra_list),
222 .cra_init = padlock_sha1_cra_init,
223 .cra_exit = padlock_cra_exit,
224 .cra_u = {
225 .digest = {
226 .dia_digestsize = SHA1_DIGEST_SIZE,
227 .dia_init = padlock_sha_init,
228 .dia_update = padlock_sha_update,
229 .dia_final = padlock_sha_final,
230 }
231 }
232};
233
234static struct crypto_alg sha256_alg = {
235 .cra_name = "sha256",
236 .cra_driver_name = "sha256-padlock",
237 .cra_priority = PADLOCK_CRA_PRIORITY,
238 .cra_flags = CRYPTO_ALG_TYPE_DIGEST |
239 CRYPTO_ALG_NEED_FALLBACK,
240 .cra_blocksize = SHA256_HMAC_BLOCK_SIZE,
241 .cra_ctxsize = sizeof(struct padlock_sha_ctx),
242 .cra_module = THIS_MODULE,
243 .cra_list = LIST_HEAD_INIT(sha256_alg.cra_list),
244 .cra_init = padlock_sha256_cra_init,
245 .cra_exit = padlock_cra_exit,
246 .cra_u = {
247 .digest = {
248 .dia_digestsize = SHA256_DIGEST_SIZE,
249 .dia_init = padlock_sha_init,
250 .dia_update = padlock_sha_update,
251 .dia_final = padlock_sha_final,
252 }
253 }
254};
255
256static void __init padlock_sha_check_fallbacks(void)
257{
258 if (!crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC |
259 CRYPTO_ALG_NEED_FALLBACK))
260 printk(KERN_WARNING PFX
261 "Couldn't load fallback module for sha1.\n");
262
263 if (!crypto_has_hash("sha256", 0, CRYPTO_ALG_ASYNC |
264 CRYPTO_ALG_NEED_FALLBACK))
265 printk(KERN_WARNING PFX
266 "Couldn't load fallback module for sha256.\n");
267}
268
269static int __init padlock_init(void)
270{
271 int rc = -ENODEV;
272
273 if (!cpu_has_phe) {
274 printk(KERN_ERR PFX "VIA PadLock Hash Engine not detected.\n");
275 return -ENODEV;
276 }
277
278 if (!cpu_has_phe_enabled) {
279 printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
280 return -ENODEV;
281 }
282
283 padlock_sha_check_fallbacks();
284
285 rc = crypto_register_alg(&sha1_alg);
286 if (rc)
287 goto out;
288
289 rc = crypto_register_alg(&sha256_alg);
290 if (rc)
291 goto out_unreg1;
292
293 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n");
294
295 return 0;
296
297out_unreg1:
298 crypto_unregister_alg(&sha1_alg);
299out:
300 printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
301 return rc;
302}
303
304static void __exit padlock_fini(void)
305{
306 crypto_unregister_alg(&sha1_alg);
307 crypto_unregister_alg(&sha256_alg);
308}
309
310module_init(padlock_init);
311module_exit(padlock_fini);
312
313MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
314MODULE_LICENSE("GPL");
315MODULE_AUTHOR("Michal Ludvig");
316
317MODULE_ALIAS("sha1-padlock");
318MODULE_ALIAS("sha256-padlock");
diff --git a/drivers/crypto/padlock.c b/drivers/crypto/padlock.c
new file mode 100644
index 000000000000..d6d7dd5bb98c
--- /dev/null
+++ b/drivers/crypto/padlock.c
@@ -0,0 +1,58 @@
1/*
2 * Cryptographic API.
3 *
4 * Support for VIA PadLock hardware crypto engine.
5 *
6 * Copyright (c) 2006 Michal Ludvig <michal@logix.cz>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 */
14
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/errno.h>
18#include <linux/crypto.h>
19#include <linux/cryptohash.h>
20#include <linux/interrupt.h>
21#include <linux/kernel.h>
22#include <linux/scatterlist.h>
23#include "padlock.h"
24
25static int __init padlock_init(void)
26{
27 int success = 0;
28
29 if (crypto_has_cipher("aes-padlock", 0, 0))
30 success++;
31
32 if (crypto_has_hash("sha1-padlock", 0, 0))
33 success++;
34
35 if (crypto_has_hash("sha256-padlock", 0, 0))
36 success++;
37
38 if (!success) {
39 printk(KERN_WARNING PFX "No VIA PadLock drivers have been loaded.\n");
40 return -ENODEV;
41 }
42
43 printk(KERN_NOTICE PFX "%d drivers are available.\n", success);
44
45 return 0;
46}
47
48static void __exit padlock_fini(void)
49{
50}
51
52module_init(padlock_init);
53module_exit(padlock_fini);
54
55MODULE_DESCRIPTION("Load all configured PadLock algorithms.");
56MODULE_LICENSE("GPL");
57MODULE_AUTHOR("Michal Ludvig");
58
diff --git a/drivers/crypto/padlock.h b/drivers/crypto/padlock.h
index b78489bc298a..b728e4518bd1 100644
--- a/drivers/crypto/padlock.h
+++ b/drivers/crypto/padlock.h
@@ -15,22 +15,9 @@
15 15
16#define PADLOCK_ALIGNMENT 16 16#define PADLOCK_ALIGNMENT 16
17 17
18/* Control word. */
19struct cword {
20 unsigned int __attribute__ ((__packed__))
21 rounds:4,
22 algo:3,
23 keygen:1,
24 interm:1,
25 encdec:1,
26 ksize:2;
27} __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
28
29#define PFX "padlock: " 18#define PFX "padlock: "
30 19
31#ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES 20#define PADLOCK_CRA_PRIORITY 300
32int padlock_init_aes(void); 21#define PADLOCK_COMPOSITE_PRIORITY 400
33void padlock_fini_aes(void);
34#endif
35 22
36#endif /* _CRYPTO_PADLOCK_H */ 23#endif /* _CRYPTO_PADLOCK_H */
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 6022ed12a795..bdbd34993a80 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -5,6 +5,7 @@
5 * This file is released under the GPL. 5 * This file is released under the GPL.
6 */ 6 */
7 7
8#include <linux/err.h>
8#include <linux/module.h> 9#include <linux/module.h>
9#include <linux/init.h> 10#include <linux/init.h>
10#include <linux/kernel.h> 11#include <linux/kernel.h>
@@ -78,11 +79,13 @@ struct crypt_config {
78 */ 79 */
79 struct crypt_iv_operations *iv_gen_ops; 80 struct crypt_iv_operations *iv_gen_ops;
80 char *iv_mode; 81 char *iv_mode;
81 void *iv_gen_private; 82 struct crypto_cipher *iv_gen_private;
82 sector_t iv_offset; 83 sector_t iv_offset;
83 unsigned int iv_size; 84 unsigned int iv_size;
84 85
85 struct crypto_tfm *tfm; 86 char cipher[CRYPTO_MAX_ALG_NAME];
87 char chainmode[CRYPTO_MAX_ALG_NAME];
88 struct crypto_blkcipher *tfm;
86 unsigned int key_size; 89 unsigned int key_size;
87 u8 key[0]; 90 u8 key[0];
88}; 91};
@@ -96,12 +99,12 @@ static kmem_cache_t *_crypt_io_pool;
96/* 99/*
97 * Different IV generation algorithms: 100 * Different IV generation algorithms:
98 * 101 *
99 * plain: the initial vector is the 32-bit low-endian version of the sector 102 * plain: the initial vector is the 32-bit little-endian version of the sector
100 * number, padded with zeros if neccessary. 103 * number, padded with zeros if neccessary.
101 * 104 *
102 * ess_iv: "encrypted sector|salt initial vector", the sector number is 105 * essiv: "encrypted sector|salt initial vector", the sector number is
103 * encrypted with the bulk cipher using a salt as key. The salt 106 * encrypted with the bulk cipher using a salt as key. The salt
104 * should be derived from the bulk cipher's key via hashing. 107 * should be derived from the bulk cipher's key via hashing.
105 * 108 *
106 * plumb: unimplemented, see: 109 * plumb: unimplemented, see:
107 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 110 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
@@ -118,11 +121,13 @@ static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
118static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, 121static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
119 const char *opts) 122 const char *opts)
120{ 123{
121 struct crypto_tfm *essiv_tfm; 124 struct crypto_cipher *essiv_tfm;
122 struct crypto_tfm *hash_tfm; 125 struct crypto_hash *hash_tfm;
126 struct hash_desc desc;
123 struct scatterlist sg; 127 struct scatterlist sg;
124 unsigned int saltsize; 128 unsigned int saltsize;
125 u8 *salt; 129 u8 *salt;
130 int err;
126 131
127 if (opts == NULL) { 132 if (opts == NULL) {
128 ti->error = "Digest algorithm missing for ESSIV mode"; 133 ti->error = "Digest algorithm missing for ESSIV mode";
@@ -130,76 +135,70 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
130 } 135 }
131 136
132 /* Hash the cipher key with the given hash algorithm */ 137 /* Hash the cipher key with the given hash algorithm */
133 hash_tfm = crypto_alloc_tfm(opts, CRYPTO_TFM_REQ_MAY_SLEEP); 138 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
134 if (hash_tfm == NULL) { 139 if (IS_ERR(hash_tfm)) {
135 ti->error = "Error initializing ESSIV hash"; 140 ti->error = "Error initializing ESSIV hash";
136 return -EINVAL; 141 return PTR_ERR(hash_tfm);
137 } 142 }
138 143
139 if (crypto_tfm_alg_type(hash_tfm) != CRYPTO_ALG_TYPE_DIGEST) { 144 saltsize = crypto_hash_digestsize(hash_tfm);
140 ti->error = "Expected digest algorithm for ESSIV hash";
141 crypto_free_tfm(hash_tfm);
142 return -EINVAL;
143 }
144
145 saltsize = crypto_tfm_alg_digestsize(hash_tfm);
146 salt = kmalloc(saltsize, GFP_KERNEL); 145 salt = kmalloc(saltsize, GFP_KERNEL);
147 if (salt == NULL) { 146 if (salt == NULL) {
148 ti->error = "Error kmallocing salt storage in ESSIV"; 147 ti->error = "Error kmallocing salt storage in ESSIV";
149 crypto_free_tfm(hash_tfm); 148 crypto_free_hash(hash_tfm);
150 return -ENOMEM; 149 return -ENOMEM;
151 } 150 }
152 151
153 sg_set_buf(&sg, cc->key, cc->key_size); 152 sg_set_buf(&sg, cc->key, cc->key_size);
154 crypto_digest_digest(hash_tfm, &sg, 1, salt); 153 desc.tfm = hash_tfm;
155 crypto_free_tfm(hash_tfm); 154 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
155 err = crypto_hash_digest(&desc, &sg, cc->key_size, salt);
156 crypto_free_hash(hash_tfm);
157
158 if (err) {
159 ti->error = "Error calculating hash in ESSIV";
160 return err;
161 }
156 162
157 /* Setup the essiv_tfm with the given salt */ 163 /* Setup the essiv_tfm with the given salt */
158 essiv_tfm = crypto_alloc_tfm(crypto_tfm_alg_name(cc->tfm), 164 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
159 CRYPTO_TFM_MODE_ECB | 165 if (IS_ERR(essiv_tfm)) {
160 CRYPTO_TFM_REQ_MAY_SLEEP);
161 if (essiv_tfm == NULL) {
162 ti->error = "Error allocating crypto tfm for ESSIV"; 166 ti->error = "Error allocating crypto tfm for ESSIV";
163 kfree(salt); 167 kfree(salt);
164 return -EINVAL; 168 return PTR_ERR(essiv_tfm);
165 } 169 }
166 if (crypto_tfm_alg_blocksize(essiv_tfm) 170 if (crypto_cipher_blocksize(essiv_tfm) !=
167 != crypto_tfm_alg_ivsize(cc->tfm)) { 171 crypto_blkcipher_ivsize(cc->tfm)) {
168 ti->error = "Block size of ESSIV cipher does " 172 ti->error = "Block size of ESSIV cipher does "
169 "not match IV size of block cipher"; 173 "not match IV size of block cipher";
170 crypto_free_tfm(essiv_tfm); 174 crypto_free_cipher(essiv_tfm);
171 kfree(salt); 175 kfree(salt);
172 return -EINVAL; 176 return -EINVAL;
173 } 177 }
174 if (crypto_cipher_setkey(essiv_tfm, salt, saltsize) < 0) { 178 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
179 if (err) {
175 ti->error = "Failed to set key for ESSIV cipher"; 180 ti->error = "Failed to set key for ESSIV cipher";
176 crypto_free_tfm(essiv_tfm); 181 crypto_free_cipher(essiv_tfm);
177 kfree(salt); 182 kfree(salt);
178 return -EINVAL; 183 return err;
179 } 184 }
180 kfree(salt); 185 kfree(salt);
181 186
182 cc->iv_gen_private = (void *)essiv_tfm; 187 cc->iv_gen_private = essiv_tfm;
183 return 0; 188 return 0;
184} 189}
185 190
186static void crypt_iv_essiv_dtr(struct crypt_config *cc) 191static void crypt_iv_essiv_dtr(struct crypt_config *cc)
187{ 192{
188 crypto_free_tfm((struct crypto_tfm *)cc->iv_gen_private); 193 crypto_free_cipher(cc->iv_gen_private);
189 cc->iv_gen_private = NULL; 194 cc->iv_gen_private = NULL;
190} 195}
191 196
192static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 197static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
193{ 198{
194 struct scatterlist sg;
195
196 memset(iv, 0, cc->iv_size); 199 memset(iv, 0, cc->iv_size);
197 *(u64 *)iv = cpu_to_le64(sector); 200 *(u64 *)iv = cpu_to_le64(sector);
198 201 crypto_cipher_encrypt_one(cc->iv_gen_private, iv, iv);
199 sg_set_buf(&sg, iv, cc->iv_size);
200 crypto_cipher_encrypt((struct crypto_tfm *)cc->iv_gen_private,
201 &sg, &sg, cc->iv_size);
202
203 return 0; 202 return 0;
204} 203}
205 204
@@ -220,6 +219,11 @@ crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
220 int write, sector_t sector) 219 int write, sector_t sector)
221{ 220{
222 u8 iv[cc->iv_size]; 221 u8 iv[cc->iv_size];
222 struct blkcipher_desc desc = {
223 .tfm = cc->tfm,
224 .info = iv,
225 .flags = CRYPTO_TFM_REQ_MAY_SLEEP,
226 };
223 int r; 227 int r;
224 228
225 if (cc->iv_gen_ops) { 229 if (cc->iv_gen_ops) {
@@ -228,14 +232,14 @@ crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
228 return r; 232 return r;
229 233
230 if (write) 234 if (write)
231 r = crypto_cipher_encrypt_iv(cc->tfm, out, in, length, iv); 235 r = crypto_blkcipher_encrypt_iv(&desc, out, in, length);
232 else 236 else
233 r = crypto_cipher_decrypt_iv(cc->tfm, out, in, length, iv); 237 r = crypto_blkcipher_decrypt_iv(&desc, out, in, length);
234 } else { 238 } else {
235 if (write) 239 if (write)
236 r = crypto_cipher_encrypt(cc->tfm, out, in, length); 240 r = crypto_blkcipher_encrypt(&desc, out, in, length);
237 else 241 else
238 r = crypto_cipher_decrypt(cc->tfm, out, in, length); 242 r = crypto_blkcipher_decrypt(&desc, out, in, length);
239 } 243 }
240 244
241 return r; 245 return r;
@@ -510,13 +514,12 @@ static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
510static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) 514static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
511{ 515{
512 struct crypt_config *cc; 516 struct crypt_config *cc;
513 struct crypto_tfm *tfm; 517 struct crypto_blkcipher *tfm;
514 char *tmp; 518 char *tmp;
515 char *cipher; 519 char *cipher;
516 char *chainmode; 520 char *chainmode;
517 char *ivmode; 521 char *ivmode;
518 char *ivopts; 522 char *ivopts;
519 unsigned int crypto_flags;
520 unsigned int key_size; 523 unsigned int key_size;
521 unsigned long long tmpll; 524 unsigned long long tmpll;
522 525
@@ -556,31 +559,25 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
556 ivmode = "plain"; 559 ivmode = "plain";
557 } 560 }
558 561
559 /* Choose crypto_flags according to chainmode */ 562 if (strcmp(chainmode, "ecb") && !ivmode) {
560 if (strcmp(chainmode, "cbc") == 0) 563 ti->error = "This chaining mode requires an IV mechanism";
561 crypto_flags = CRYPTO_TFM_MODE_CBC;
562 else if (strcmp(chainmode, "ecb") == 0)
563 crypto_flags = CRYPTO_TFM_MODE_ECB;
564 else {
565 ti->error = "Unknown chaining mode";
566 goto bad1; 564 goto bad1;
567 } 565 }
568 566
569 if (crypto_flags != CRYPTO_TFM_MODE_ECB && !ivmode) { 567 if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)", chainmode,
570 ti->error = "This chaining mode requires an IV mechanism"; 568 cipher) >= CRYPTO_MAX_ALG_NAME) {
569 ti->error = "Chain mode + cipher name is too long";
571 goto bad1; 570 goto bad1;
572 } 571 }
573 572
574 tfm = crypto_alloc_tfm(cipher, crypto_flags | CRYPTO_TFM_REQ_MAY_SLEEP); 573 tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
575 if (!tfm) { 574 if (IS_ERR(tfm)) {
576 ti->error = "Error allocating crypto tfm"; 575 ti->error = "Error allocating crypto tfm";
577 goto bad1; 576 goto bad1;
578 } 577 }
579 if (crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER) {
580 ti->error = "Expected cipher algorithm";
581 goto bad2;
582 }
583 578
579 strcpy(cc->cipher, cipher);
580 strcpy(cc->chainmode, chainmode);
584 cc->tfm = tfm; 581 cc->tfm = tfm;
585 582
586 /* 583 /*
@@ -603,12 +600,12 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
603 cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) 600 cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
604 goto bad2; 601 goto bad2;
605 602
606 if (tfm->crt_cipher.cit_decrypt_iv && tfm->crt_cipher.cit_encrypt_iv) 603 cc->iv_size = crypto_blkcipher_ivsize(tfm);
604 if (cc->iv_size)
607 /* at least a 64 bit sector number should fit in our buffer */ 605 /* at least a 64 bit sector number should fit in our buffer */
608 cc->iv_size = max(crypto_tfm_alg_ivsize(tfm), 606 cc->iv_size = max(cc->iv_size,
609 (unsigned int)(sizeof(u64) / sizeof(u8))); 607 (unsigned int)(sizeof(u64) / sizeof(u8)));
610 else { 608 else {
611 cc->iv_size = 0;
612 if (cc->iv_gen_ops) { 609 if (cc->iv_gen_ops) {
613 DMWARN("Selected cipher does not support IVs"); 610 DMWARN("Selected cipher does not support IVs");
614 if (cc->iv_gen_ops->dtr) 611 if (cc->iv_gen_ops->dtr)
@@ -629,7 +626,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
629 goto bad4; 626 goto bad4;
630 } 627 }
631 628
632 if (tfm->crt_cipher.cit_setkey(tfm, cc->key, key_size) < 0) { 629 if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) {
633 ti->error = "Error setting key"; 630 ti->error = "Error setting key";
634 goto bad5; 631 goto bad5;
635 } 632 }
@@ -675,7 +672,7 @@ bad3:
675 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) 672 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
676 cc->iv_gen_ops->dtr(cc); 673 cc->iv_gen_ops->dtr(cc);
677bad2: 674bad2:
678 crypto_free_tfm(tfm); 675 crypto_free_blkcipher(tfm);
679bad1: 676bad1:
680 /* Must zero key material before freeing */ 677 /* Must zero key material before freeing */
681 memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); 678 memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
@@ -693,7 +690,7 @@ static void crypt_dtr(struct dm_target *ti)
693 kfree(cc->iv_mode); 690 kfree(cc->iv_mode);
694 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) 691 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
695 cc->iv_gen_ops->dtr(cc); 692 cc->iv_gen_ops->dtr(cc);
696 crypto_free_tfm(cc->tfm); 693 crypto_free_blkcipher(cc->tfm);
697 dm_put_device(ti, cc->dev); 694 dm_put_device(ti, cc->dev);
698 695
699 /* Must zero key material before freeing */ 696 /* Must zero key material before freeing */
@@ -858,18 +855,9 @@ static int crypt_status(struct dm_target *ti, status_type_t type,
858 break; 855 break;
859 856
860 case STATUSTYPE_TABLE: 857 case STATUSTYPE_TABLE:
861 cipher = crypto_tfm_alg_name(cc->tfm); 858 cipher = crypto_blkcipher_name(cc->tfm);
862 859
863 switch(cc->tfm->crt_cipher.cit_mode) { 860 chainmode = cc->chainmode;
864 case CRYPTO_TFM_MODE_CBC:
865 chainmode = "cbc";
866 break;
867 case CRYPTO_TFM_MODE_ECB:
868 chainmode = "ecb";
869 break;
870 default:
871 BUG();
872 }
873 861
874 if (cc->iv_mode) 862 if (cc->iv_mode)
875 DMEMIT("%s-%s-%s ", cipher, chainmode, cc->iv_mode); 863 DMEMIT("%s-%s-%s ", cipher, chainmode, cc->iv_mode);
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c
index 51ff9a9d1bb5..f3655fd772f5 100644
--- a/drivers/net/ppp_mppe.c
+++ b/drivers/net/ppp_mppe.c
@@ -43,6 +43,7 @@
43 * deprecated in 2.6 43 * deprecated in 2.6
44 */ 44 */
45 45
46#include <linux/err.h>
46#include <linux/module.h> 47#include <linux/module.h>
47#include <linux/kernel.h> 48#include <linux/kernel.h>
48#include <linux/version.h> 49#include <linux/version.h>
@@ -64,12 +65,13 @@ MODULE_LICENSE("Dual BSD/GPL");
64MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE)); 65MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE));
65MODULE_VERSION("1.0.2"); 66MODULE_VERSION("1.0.2");
66 67
67static void 68static unsigned int
68setup_sg(struct scatterlist *sg, const void *address, unsigned int length) 69setup_sg(struct scatterlist *sg, const void *address, unsigned int length)
69{ 70{
70 sg[0].page = virt_to_page(address); 71 sg[0].page = virt_to_page(address);
71 sg[0].offset = offset_in_page(address); 72 sg[0].offset = offset_in_page(address);
72 sg[0].length = length; 73 sg[0].length = length;
74 return length;
73} 75}
74 76
75#define SHA1_PAD_SIZE 40 77#define SHA1_PAD_SIZE 40
@@ -95,8 +97,8 @@ static inline void sha_pad_init(struct sha_pad *shapad)
95 * State for an MPPE (de)compressor. 97 * State for an MPPE (de)compressor.
96 */ 98 */
97struct ppp_mppe_state { 99struct ppp_mppe_state {
98 struct crypto_tfm *arc4; 100 struct crypto_blkcipher *arc4;
99 struct crypto_tfm *sha1; 101 struct crypto_hash *sha1;
100 unsigned char *sha1_digest; 102 unsigned char *sha1_digest;
101 unsigned char master_key[MPPE_MAX_KEY_LEN]; 103 unsigned char master_key[MPPE_MAX_KEY_LEN];
102 unsigned char session_key[MPPE_MAX_KEY_LEN]; 104 unsigned char session_key[MPPE_MAX_KEY_LEN];
@@ -136,14 +138,21 @@ struct ppp_mppe_state {
136 */ 138 */
137static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *InterimKey) 139static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *InterimKey)
138{ 140{
141 struct hash_desc desc;
139 struct scatterlist sg[4]; 142 struct scatterlist sg[4];
143 unsigned int nbytes;
140 144
141 setup_sg(&sg[0], state->master_key, state->keylen); 145 nbytes = setup_sg(&sg[0], state->master_key, state->keylen);
142 setup_sg(&sg[1], sha_pad->sha_pad1, sizeof(sha_pad->sha_pad1)); 146 nbytes += setup_sg(&sg[1], sha_pad->sha_pad1,
143 setup_sg(&sg[2], state->session_key, state->keylen); 147 sizeof(sha_pad->sha_pad1));
144 setup_sg(&sg[3], sha_pad->sha_pad2, sizeof(sha_pad->sha_pad2)); 148 nbytes += setup_sg(&sg[2], state->session_key, state->keylen);
149 nbytes += setup_sg(&sg[3], sha_pad->sha_pad2,
150 sizeof(sha_pad->sha_pad2));
145 151
146 crypto_digest_digest (state->sha1, sg, 4, state->sha1_digest); 152 desc.tfm = state->sha1;
153 desc.flags = 0;
154
155 crypto_hash_digest(&desc, sg, nbytes, state->sha1_digest);
147 156
148 memcpy(InterimKey, state->sha1_digest, state->keylen); 157 memcpy(InterimKey, state->sha1_digest, state->keylen);
149} 158}
@@ -156,14 +165,15 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
156{ 165{
157 unsigned char InterimKey[MPPE_MAX_KEY_LEN]; 166 unsigned char InterimKey[MPPE_MAX_KEY_LEN];
158 struct scatterlist sg_in[1], sg_out[1]; 167 struct scatterlist sg_in[1], sg_out[1];
168 struct blkcipher_desc desc = { .tfm = state->arc4 };
159 169
160 get_new_key_from_sha(state, InterimKey); 170 get_new_key_from_sha(state, InterimKey);
161 if (!initial_key) { 171 if (!initial_key) {
162 crypto_cipher_setkey(state->arc4, InterimKey, state->keylen); 172 crypto_blkcipher_setkey(state->arc4, InterimKey, state->keylen);
163 setup_sg(sg_in, InterimKey, state->keylen); 173 setup_sg(sg_in, InterimKey, state->keylen);
164 setup_sg(sg_out, state->session_key, state->keylen); 174 setup_sg(sg_out, state->session_key, state->keylen);
165 if (crypto_cipher_encrypt(state->arc4, sg_out, sg_in, 175 if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
166 state->keylen) != 0) { 176 state->keylen) != 0) {
167 printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n"); 177 printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n");
168 } 178 }
169 } else { 179 } else {
@@ -175,7 +185,7 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
175 state->session_key[1] = 0x26; 185 state->session_key[1] = 0x26;
176 state->session_key[2] = 0x9e; 186 state->session_key[2] = 0x9e;
177 } 187 }
178 crypto_cipher_setkey(state->arc4, state->session_key, state->keylen); 188 crypto_blkcipher_setkey(state->arc4, state->session_key, state->keylen);
179} 189}
180 190
181/* 191/*
@@ -196,15 +206,19 @@ static void *mppe_alloc(unsigned char *options, int optlen)
196 206
197 memset(state, 0, sizeof(*state)); 207 memset(state, 0, sizeof(*state));
198 208
199 state->arc4 = crypto_alloc_tfm("arc4", 0); 209 state->arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
200 if (!state->arc4) 210 if (IS_ERR(state->arc4)) {
211 state->arc4 = NULL;
201 goto out_free; 212 goto out_free;
213 }
202 214
203 state->sha1 = crypto_alloc_tfm("sha1", 0); 215 state->sha1 = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC);
204 if (!state->sha1) 216 if (IS_ERR(state->sha1)) {
217 state->sha1 = NULL;
205 goto out_free; 218 goto out_free;
219 }
206 220
207 digestsize = crypto_tfm_alg_digestsize(state->sha1); 221 digestsize = crypto_hash_digestsize(state->sha1);
208 if (digestsize < MPPE_MAX_KEY_LEN) 222 if (digestsize < MPPE_MAX_KEY_LEN)
209 goto out_free; 223 goto out_free;
210 224
@@ -229,9 +243,9 @@ static void *mppe_alloc(unsigned char *options, int optlen)
229 if (state->sha1_digest) 243 if (state->sha1_digest)
230 kfree(state->sha1_digest); 244 kfree(state->sha1_digest);
231 if (state->sha1) 245 if (state->sha1)
232 crypto_free_tfm(state->sha1); 246 crypto_free_hash(state->sha1);
233 if (state->arc4) 247 if (state->arc4)
234 crypto_free_tfm(state->arc4); 248 crypto_free_blkcipher(state->arc4);
235 kfree(state); 249 kfree(state);
236 out: 250 out:
237 return NULL; 251 return NULL;
@@ -247,9 +261,9 @@ static void mppe_free(void *arg)
247 if (state->sha1_digest) 261 if (state->sha1_digest)
248 kfree(state->sha1_digest); 262 kfree(state->sha1_digest);
249 if (state->sha1) 263 if (state->sha1)
250 crypto_free_tfm(state->sha1); 264 crypto_free_hash(state->sha1);
251 if (state->arc4) 265 if (state->arc4)
252 crypto_free_tfm(state->arc4); 266 crypto_free_blkcipher(state->arc4);
253 kfree(state); 267 kfree(state);
254 } 268 }
255} 269}
@@ -356,6 +370,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
356 int isize, int osize) 370 int isize, int osize)
357{ 371{
358 struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; 372 struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
373 struct blkcipher_desc desc = { .tfm = state->arc4 };
359 int proto; 374 int proto;
360 struct scatterlist sg_in[1], sg_out[1]; 375 struct scatterlist sg_in[1], sg_out[1];
361 376
@@ -413,7 +428,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
413 /* Encrypt packet */ 428 /* Encrypt packet */
414 setup_sg(sg_in, ibuf, isize); 429 setup_sg(sg_in, ibuf, isize);
415 setup_sg(sg_out, obuf, osize); 430 setup_sg(sg_out, obuf, osize);
416 if (crypto_cipher_encrypt(state->arc4, sg_out, sg_in, isize) != 0) { 431 if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, isize) != 0) {
417 printk(KERN_DEBUG "crypto_cypher_encrypt failed\n"); 432 printk(KERN_DEBUG "crypto_cypher_encrypt failed\n");
418 return -1; 433 return -1;
419 } 434 }
@@ -462,6 +477,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
462 int osize) 477 int osize)
463{ 478{
464 struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; 479 struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
480 struct blkcipher_desc desc = { .tfm = state->arc4 };
465 unsigned ccount; 481 unsigned ccount;
466 int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED; 482 int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED;
467 int sanity = 0; 483 int sanity = 0;
@@ -599,7 +615,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
599 */ 615 */
600 setup_sg(sg_in, ibuf, 1); 616 setup_sg(sg_in, ibuf, 1);
601 setup_sg(sg_out, obuf, 1); 617 setup_sg(sg_out, obuf, 1);
602 if (crypto_cipher_decrypt(state->arc4, sg_out, sg_in, 1) != 0) { 618 if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, 1) != 0) {
603 printk(KERN_DEBUG "crypto_cypher_decrypt failed\n"); 619 printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
604 return DECOMP_ERROR; 620 return DECOMP_ERROR;
605 } 621 }
@@ -619,7 +635,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
619 /* And finally, decrypt the rest of the packet. */ 635 /* And finally, decrypt the rest of the packet. */
620 setup_sg(sg_in, ibuf + 1, isize - 1); 636 setup_sg(sg_in, ibuf + 1, isize - 1);
621 setup_sg(sg_out, obuf + 1, osize - 1); 637 setup_sg(sg_out, obuf + 1, osize - 1);
622 if (crypto_cipher_decrypt(state->arc4, sg_out, sg_in, isize - 1) != 0) { 638 if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, isize - 1)) {
623 printk(KERN_DEBUG "crypto_cypher_decrypt failed\n"); 639 printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
624 return DECOMP_ERROR; 640 return DECOMP_ERROR;
625 } 641 }
@@ -694,8 +710,8 @@ static struct compressor ppp_mppe = {
694static int __init ppp_mppe_init(void) 710static int __init ppp_mppe_init(void)
695{ 711{
696 int answer; 712 int answer;
697 if (!(crypto_alg_available("arc4", 0) && 713 if (!(crypto_has_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC) &&
698 crypto_alg_available("sha1", 0))) 714 crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC)))
699 return -ENODEV; 715 return -ENODEV;
700 716
701 sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL); 717 sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL);
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index a4dd13942714..170c500169da 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -19,6 +19,7 @@
19 19
20======================================================================*/ 20======================================================================*/
21 21
22#include <linux/err.h>
22#include <linux/init.h> 23#include <linux/init.h>
23 24
24#include <linux/kernel.h> 25#include <linux/kernel.h>
@@ -1203,7 +1204,7 @@ struct airo_info {
1203 struct iw_spy_data spy_data; 1204 struct iw_spy_data spy_data;
1204 struct iw_public_data wireless_data; 1205 struct iw_public_data wireless_data;
1205 /* MIC stuff */ 1206 /* MIC stuff */
1206 struct crypto_tfm *tfm; 1207 struct crypto_cipher *tfm;
1207 mic_module mod[2]; 1208 mic_module mod[2];
1208 mic_statistics micstats; 1209 mic_statistics micstats;
1209 HostRxDesc rxfids[MPI_MAX_FIDS]; // rx/tx/config MPI350 descriptors 1210 HostRxDesc rxfids[MPI_MAX_FIDS]; // rx/tx/config MPI350 descriptors
@@ -1271,7 +1272,8 @@ static int flashrestart(struct airo_info *ai,struct net_device *dev);
1271 1272
1272static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSeq); 1273static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSeq);
1273static void MoveWindow(miccntx *context, u32 micSeq); 1274static void MoveWindow(miccntx *context, u32 micSeq);
1274static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto_tfm *); 1275static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen,
1276 struct crypto_cipher *tfm);
1275static void emmh32_init(emmh32_context *context); 1277static void emmh32_init(emmh32_context *context);
1276static void emmh32_update(emmh32_context *context, u8 *pOctets, int len); 1278static void emmh32_update(emmh32_context *context, u8 *pOctets, int len);
1277static void emmh32_final(emmh32_context *context, u8 digest[4]); 1279static void emmh32_final(emmh32_context *context, u8 digest[4]);
@@ -1339,10 +1341,11 @@ static int micsetup(struct airo_info *ai) {
1339 int i; 1341 int i;
1340 1342
1341 if (ai->tfm == NULL) 1343 if (ai->tfm == NULL)
1342 ai->tfm = crypto_alloc_tfm("aes", CRYPTO_TFM_REQ_MAY_SLEEP); 1344 ai->tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
1343 1345
1344 if (ai->tfm == NULL) { 1346 if (IS_ERR(ai->tfm)) {
1345 airo_print_err(ai->dev->name, "failed to load transform for AES"); 1347 airo_print_err(ai->dev->name, "failed to load transform for AES");
1348 ai->tfm = NULL;
1346 return ERROR; 1349 return ERROR;
1347 } 1350 }
1348 1351
@@ -1608,7 +1611,8 @@ static void MoveWindow(miccntx *context, u32 micSeq)
1608static unsigned char aes_counter[16]; 1611static unsigned char aes_counter[16];
1609 1612
1610/* expand the key to fill the MMH coefficient array */ 1613/* expand the key to fill the MMH coefficient array */
1611static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto_tfm *tfm) 1614static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen,
1615 struct crypto_cipher *tfm)
1612{ 1616{
1613 /* take the keying material, expand if necessary, truncate at 16-bytes */ 1617 /* take the keying material, expand if necessary, truncate at 16-bytes */
1614 /* run through AES counter mode to generate context->coeff[] */ 1618 /* run through AES counter mode to generate context->coeff[] */
@@ -1616,7 +1620,6 @@ static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct
1616 int i,j; 1620 int i,j;
1617 u32 counter; 1621 u32 counter;
1618 u8 *cipher, plain[16]; 1622 u8 *cipher, plain[16];
1619 struct scatterlist sg[1];
1620 1623
1621 crypto_cipher_setkey(tfm, pkey, 16); 1624 crypto_cipher_setkey(tfm, pkey, 16);
1622 counter = 0; 1625 counter = 0;
@@ -1627,9 +1630,8 @@ static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct
1627 aes_counter[12] = (u8)(counter >> 24); 1630 aes_counter[12] = (u8)(counter >> 24);
1628 counter++; 1631 counter++;
1629 memcpy (plain, aes_counter, 16); 1632 memcpy (plain, aes_counter, 16);
1630 sg_set_buf(sg, plain, 16); 1633 crypto_cipher_encrypt_one(tfm, plain, plain);
1631 crypto_cipher_encrypt(tfm, sg, sg, 16); 1634 cipher = plain;
1632 cipher = kmap(sg->page) + sg->offset;
1633 for (j=0; (j<16) && (i< (sizeof(context->coeff)/sizeof(context->coeff[0]))); ) { 1635 for (j=0; (j<16) && (i< (sizeof(context->coeff)/sizeof(context->coeff[0]))); ) {
1634 context->coeff[i++] = ntohl(*(u32 *)&cipher[j]); 1636 context->coeff[i++] = ntohl(*(u32 *)&cipher[j]);
1635 j += 4; 1637 j += 4;
@@ -2432,7 +2434,7 @@ void stop_airo_card( struct net_device *dev, int freeres )
2432 ai->shared, ai->shared_dma); 2434 ai->shared, ai->shared_dma);
2433 } 2435 }
2434 } 2436 }
2435 crypto_free_tfm(ai->tfm); 2437 crypto_free_cipher(ai->tfm);
2436 del_airo_dev( dev ); 2438 del_airo_dev( dev );
2437 free_netdev( dev ); 2439 free_netdev( dev );
2438} 2440}
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 058f094f945a..66a1ae1d6982 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -26,6 +26,7 @@
26 * Zhenyu Wang 26 * Zhenyu Wang
27 */ 27 */
28 28
29#include <linux/err.h>
29#include <linux/types.h> 30#include <linux/types.h>
30#include <linux/list.h> 31#include <linux/list.h>
31#include <linux/inet.h> 32#include <linux/inet.h>
@@ -107,8 +108,11 @@ iscsi_hdr_digest(struct iscsi_conn *conn, struct iscsi_buf *buf,
107 u8* crc) 108 u8* crc)
108{ 109{
109 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 110 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
111 struct hash_desc desc;
110 112
111 crypto_digest_digest(tcp_conn->tx_tfm, &buf->sg, 1, crc); 113 desc.tfm = tcp_conn->tx_tfm;
114 desc.flags = 0;
115 crypto_hash_digest(&desc, &buf->sg, buf->sg.length, crc);
112 buf->sg.length += sizeof(uint32_t); 116 buf->sg.length += sizeof(uint32_t);
113} 117}
114 118
@@ -452,11 +456,14 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
452 } 456 }
453 457
454 if (conn->hdrdgst_en) { 458 if (conn->hdrdgst_en) {
459 struct hash_desc desc;
455 struct scatterlist sg; 460 struct scatterlist sg;
456 461
457 sg_init_one(&sg, (u8 *)hdr, 462 sg_init_one(&sg, (u8 *)hdr,
458 sizeof(struct iscsi_hdr) + ahslen); 463 sizeof(struct iscsi_hdr) + ahslen);
459 crypto_digest_digest(tcp_conn->rx_tfm, &sg, 1, (u8 *)&cdgst); 464 desc.tfm = tcp_conn->rx_tfm;
465 desc.flags = 0;
466 crypto_hash_digest(&desc, &sg, sg.length, (u8 *)&cdgst);
460 rdgst = *(uint32_t*)((char*)hdr + sizeof(struct iscsi_hdr) + 467 rdgst = *(uint32_t*)((char*)hdr + sizeof(struct iscsi_hdr) +
461 ahslen); 468 ahslen);
462 if (cdgst != rdgst) { 469 if (cdgst != rdgst) {
@@ -673,7 +680,7 @@ partial_sg_digest_update(struct iscsi_tcp_conn *tcp_conn,
673 memcpy(&temp, sg, sizeof(struct scatterlist)); 680 memcpy(&temp, sg, sizeof(struct scatterlist));
674 temp.offset = offset; 681 temp.offset = offset;
675 temp.length = length; 682 temp.length = length;
676 crypto_digest_update(tcp_conn->data_rx_tfm, &temp, 1); 683 crypto_hash_update(&tcp_conn->data_rx_hash, &temp, length);
677} 684}
678 685
679static void 686static void
@@ -682,7 +689,7 @@ iscsi_recv_digest_update(struct iscsi_tcp_conn *tcp_conn, char* buf, int len)
682 struct scatterlist tmp; 689 struct scatterlist tmp;
683 690
684 sg_init_one(&tmp, buf, len); 691 sg_init_one(&tmp, buf, len);
685 crypto_digest_update(tcp_conn->data_rx_tfm, &tmp, 1); 692 crypto_hash_update(&tcp_conn->data_rx_hash, &tmp, len);
686} 693}
687 694
688static int iscsi_scsi_data_in(struct iscsi_conn *conn) 695static int iscsi_scsi_data_in(struct iscsi_conn *conn)
@@ -736,9 +743,9 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn)
736 if (!rc) { 743 if (!rc) {
737 if (conn->datadgst_en) { 744 if (conn->datadgst_en) {
738 if (!offset) 745 if (!offset)
739 crypto_digest_update( 746 crypto_hash_update(
740 tcp_conn->data_rx_tfm, 747 &tcp_conn->data_rx_hash,
741 &sg[i], 1); 748 &sg[i], sg[i].length);
742 else 749 else
743 partial_sg_digest_update(tcp_conn, 750 partial_sg_digest_update(tcp_conn,
744 &sg[i], 751 &sg[i],
@@ -877,8 +884,7 @@ more:
877 rc = iscsi_tcp_hdr_recv(conn); 884 rc = iscsi_tcp_hdr_recv(conn);
878 if (!rc && tcp_conn->in.datalen) { 885 if (!rc && tcp_conn->in.datalen) {
879 if (conn->datadgst_en) { 886 if (conn->datadgst_en) {
880 BUG_ON(!tcp_conn->data_rx_tfm); 887 crypto_hash_init(&tcp_conn->data_rx_hash);
881 crypto_digest_init(tcp_conn->data_rx_tfm);
882 } 888 }
883 tcp_conn->in_progress = IN_PROGRESS_DATA_RECV; 889 tcp_conn->in_progress = IN_PROGRESS_DATA_RECV;
884 } else if (rc) { 890 } else if (rc) {
@@ -931,11 +937,11 @@ more:
931 tcp_conn->in.padding); 937 tcp_conn->in.padding);
932 memset(pad, 0, tcp_conn->in.padding); 938 memset(pad, 0, tcp_conn->in.padding);
933 sg_init_one(&sg, pad, tcp_conn->in.padding); 939 sg_init_one(&sg, pad, tcp_conn->in.padding);
934 crypto_digest_update(tcp_conn->data_rx_tfm, 940 crypto_hash_update(&tcp_conn->data_rx_hash,
935 &sg, 1); 941 &sg, sg.length);
936 } 942 }
937 crypto_digest_final(tcp_conn->data_rx_tfm, 943 crypto_hash_final(&tcp_conn->data_rx_hash,
938 (u8 *) & tcp_conn->in.datadgst); 944 (u8 *)&tcp_conn->in.datadgst);
939 debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst); 945 debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst);
940 tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV; 946 tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV;
941 } else 947 } else
@@ -1181,8 +1187,7 @@ iscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn,
1181{ 1187{
1182 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1188 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1183 1189
1184 BUG_ON(!tcp_conn->data_tx_tfm); 1190 crypto_hash_init(&tcp_conn->data_tx_hash);
1185 crypto_digest_init(tcp_conn->data_tx_tfm);
1186 tcp_ctask->digest_count = 4; 1191 tcp_ctask->digest_count = 4;
1187} 1192}
1188 1193
@@ -1196,7 +1201,7 @@ iscsi_digest_final_send(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1196 int sent = 0; 1201 int sent = 0;
1197 1202
1198 if (final) 1203 if (final)
1199 crypto_digest_final(tcp_conn->data_tx_tfm, (u8*)digest); 1204 crypto_hash_final(&tcp_conn->data_tx_hash, (u8 *)digest);
1200 1205
1201 iscsi_buf_init_iov(buf, (char*)digest, 4); 1206 iscsi_buf_init_iov(buf, (char*)digest, 4);
1202 rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent); 1207 rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent);
@@ -1491,16 +1496,17 @@ handle_xmstate_imm_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1491 if (rc) { 1496 if (rc) {
1492 tcp_ctask->xmstate |= XMSTATE_IMM_DATA; 1497 tcp_ctask->xmstate |= XMSTATE_IMM_DATA;
1493 if (conn->datadgst_en) { 1498 if (conn->datadgst_en) {
1494 crypto_digest_final(tcp_conn->data_tx_tfm, 1499 crypto_hash_final(&tcp_conn->data_tx_hash,
1495 (u8*)&tcp_ctask->immdigest); 1500 (u8 *)&tcp_ctask->immdigest);
1496 debug_tcp("tx imm sendpage fail 0x%x\n", 1501 debug_tcp("tx imm sendpage fail 0x%x\n",
1497 tcp_ctask->datadigest); 1502 tcp_ctask->datadigest);
1498 } 1503 }
1499 return rc; 1504 return rc;
1500 } 1505 }
1501 if (conn->datadgst_en) 1506 if (conn->datadgst_en)
1502 crypto_digest_update(tcp_conn->data_tx_tfm, 1507 crypto_hash_update(&tcp_conn->data_tx_hash,
1503 &tcp_ctask->sendbuf.sg, 1); 1508 &tcp_ctask->sendbuf.sg,
1509 tcp_ctask->sendbuf.sg.length);
1504 1510
1505 if (!ctask->imm_count) 1511 if (!ctask->imm_count)
1506 break; 1512 break;
@@ -1577,8 +1583,8 @@ handle_xmstate_uns_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1577 tcp_ctask->xmstate |= XMSTATE_UNS_DATA; 1583 tcp_ctask->xmstate |= XMSTATE_UNS_DATA;
1578 /* will continue with this ctask later.. */ 1584 /* will continue with this ctask later.. */
1579 if (conn->datadgst_en) { 1585 if (conn->datadgst_en) {
1580 crypto_digest_final(tcp_conn->data_tx_tfm, 1586 crypto_hash_final(&tcp_conn->data_tx_hash,
1581 (u8 *)&dtask->digest); 1587 (u8 *)&dtask->digest);
1582 debug_tcp("tx uns data fail 0x%x\n", 1588 debug_tcp("tx uns data fail 0x%x\n",
1583 dtask->digest); 1589 dtask->digest);
1584 } 1590 }
@@ -1593,8 +1599,9 @@ handle_xmstate_uns_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1593 * so pass it 1599 * so pass it
1594 */ 1600 */
1595 if (conn->datadgst_en && tcp_ctask->sent - start > 0) 1601 if (conn->datadgst_en && tcp_ctask->sent - start > 0)
1596 crypto_digest_update(tcp_conn->data_tx_tfm, 1602 crypto_hash_update(&tcp_conn->data_tx_hash,
1597 &tcp_ctask->sendbuf.sg, 1); 1603 &tcp_ctask->sendbuf.sg,
1604 tcp_ctask->sendbuf.sg.length);
1598 1605
1599 if (!ctask->data_count) 1606 if (!ctask->data_count)
1600 break; 1607 break;
@@ -1668,7 +1675,7 @@ solicit_again:
1668 tcp_ctask->xmstate |= XMSTATE_SOL_DATA; 1675 tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
1669 /* will continue with this ctask later.. */ 1676 /* will continue with this ctask later.. */
1670 if (conn->datadgst_en) { 1677 if (conn->datadgst_en) {
1671 crypto_digest_final(tcp_conn->data_tx_tfm, 1678 crypto_hash_final(&tcp_conn->data_tx_hash,
1672 (u8 *)&dtask->digest); 1679 (u8 *)&dtask->digest);
1673 debug_tcp("r2t data send fail 0x%x\n", dtask->digest); 1680 debug_tcp("r2t data send fail 0x%x\n", dtask->digest);
1674 } 1681 }
@@ -1677,8 +1684,8 @@ solicit_again:
1677 1684
1678 BUG_ON(r2t->data_count < 0); 1685 BUG_ON(r2t->data_count < 0);
1679 if (conn->datadgst_en) 1686 if (conn->datadgst_en)
1680 crypto_digest_update(tcp_conn->data_tx_tfm, &r2t->sendbuf.sg, 1687 crypto_hash_update(&tcp_conn->data_tx_hash, &r2t->sendbuf.sg,
1681 1); 1688 r2t->sendbuf.sg.length);
1682 1689
1683 if (r2t->data_count) { 1690 if (r2t->data_count) {
1684 BUG_ON(ctask->sc->use_sg == 0); 1691 BUG_ON(ctask->sc->use_sg == 0);
@@ -1766,8 +1773,9 @@ handle_xmstate_w_pad(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1766 } 1773 }
1767 1774
1768 if (conn->datadgst_en) { 1775 if (conn->datadgst_en) {
1769 crypto_digest_update(tcp_conn->data_tx_tfm, 1776 crypto_hash_update(&tcp_conn->data_tx_hash,
1770 &tcp_ctask->sendbuf.sg, 1); 1777 &tcp_ctask->sendbuf.sg,
1778 tcp_ctask->sendbuf.sg.length);
1771 /* imm data? */ 1779 /* imm data? */
1772 if (!dtask) { 1780 if (!dtask) {
1773 rc = iscsi_digest_final_send(conn, ctask, 1781 rc = iscsi_digest_final_send(conn, ctask,
@@ -1963,13 +1971,13 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
1963 /* now free tcp_conn */ 1971 /* now free tcp_conn */
1964 if (digest) { 1972 if (digest) {
1965 if (tcp_conn->tx_tfm) 1973 if (tcp_conn->tx_tfm)
1966 crypto_free_tfm(tcp_conn->tx_tfm); 1974 crypto_free_hash(tcp_conn->tx_tfm);
1967 if (tcp_conn->rx_tfm) 1975 if (tcp_conn->rx_tfm)
1968 crypto_free_tfm(tcp_conn->rx_tfm); 1976 crypto_free_hash(tcp_conn->rx_tfm);
1969 if (tcp_conn->data_tx_tfm) 1977 if (tcp_conn->data_tx_hash.tfm)
1970 crypto_free_tfm(tcp_conn->data_tx_tfm); 1978 crypto_free_hash(tcp_conn->data_tx_hash.tfm);
1971 if (tcp_conn->data_rx_tfm) 1979 if (tcp_conn->data_rx_hash.tfm)
1972 crypto_free_tfm(tcp_conn->data_rx_tfm); 1980 crypto_free_hash(tcp_conn->data_rx_hash.tfm);
1973 } 1981 }
1974 1982
1975 kfree(tcp_conn); 1983 kfree(tcp_conn);
@@ -2130,44 +2138,48 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
2130 if (conn->hdrdgst_en) { 2138 if (conn->hdrdgst_en) {
2131 tcp_conn->hdr_size += sizeof(__u32); 2139 tcp_conn->hdr_size += sizeof(__u32);
2132 if (!tcp_conn->tx_tfm) 2140 if (!tcp_conn->tx_tfm)
2133 tcp_conn->tx_tfm = crypto_alloc_tfm("crc32c", 2141 tcp_conn->tx_tfm =
2134 0); 2142 crypto_alloc_hash("crc32c", 0,
2135 if (!tcp_conn->tx_tfm) 2143 CRYPTO_ALG_ASYNC);
2136 return -ENOMEM; 2144 if (IS_ERR(tcp_conn->tx_tfm))
2145 return PTR_ERR(tcp_conn->tx_tfm);
2137 if (!tcp_conn->rx_tfm) 2146 if (!tcp_conn->rx_tfm)
2138 tcp_conn->rx_tfm = crypto_alloc_tfm("crc32c", 2147 tcp_conn->rx_tfm =
2139 0); 2148 crypto_alloc_hash("crc32c", 0,
2140 if (!tcp_conn->rx_tfm) { 2149 CRYPTO_ALG_ASYNC);
2141 crypto_free_tfm(tcp_conn->tx_tfm); 2150 if (IS_ERR(tcp_conn->rx_tfm)) {
2142 return -ENOMEM; 2151 crypto_free_hash(tcp_conn->tx_tfm);
2152 return PTR_ERR(tcp_conn->rx_tfm);
2143 } 2153 }
2144 } else { 2154 } else {
2145 if (tcp_conn->tx_tfm) 2155 if (tcp_conn->tx_tfm)
2146 crypto_free_tfm(tcp_conn->tx_tfm); 2156 crypto_free_hash(tcp_conn->tx_tfm);
2147 if (tcp_conn->rx_tfm) 2157 if (tcp_conn->rx_tfm)
2148 crypto_free_tfm(tcp_conn->rx_tfm); 2158 crypto_free_hash(tcp_conn->rx_tfm);
2149 } 2159 }
2150 break; 2160 break;
2151 case ISCSI_PARAM_DATADGST_EN: 2161 case ISCSI_PARAM_DATADGST_EN:
2152 iscsi_set_param(cls_conn, param, buf, buflen); 2162 iscsi_set_param(cls_conn, param, buf, buflen);
2153 if (conn->datadgst_en) { 2163 if (conn->datadgst_en) {
2154 if (!tcp_conn->data_tx_tfm) 2164 if (!tcp_conn->data_tx_hash.tfm)
2155 tcp_conn->data_tx_tfm = 2165 tcp_conn->data_tx_hash.tfm =
2156 crypto_alloc_tfm("crc32c", 0); 2166 crypto_alloc_hash("crc32c", 0,
2157 if (!tcp_conn->data_tx_tfm) 2167 CRYPTO_ALG_ASYNC);
2158 return -ENOMEM; 2168 if (IS_ERR(tcp_conn->data_tx_hash.tfm))
2159 if (!tcp_conn->data_rx_tfm) 2169 return PTR_ERR(tcp_conn->data_tx_hash.tfm);
2160 tcp_conn->data_rx_tfm = 2170 if (!tcp_conn->data_rx_hash.tfm)
2161 crypto_alloc_tfm("crc32c", 0); 2171 tcp_conn->data_rx_hash.tfm =
2162 if (!tcp_conn->data_rx_tfm) { 2172 crypto_alloc_hash("crc32c", 0,
2163 crypto_free_tfm(tcp_conn->data_tx_tfm); 2173 CRYPTO_ALG_ASYNC);
2164 return -ENOMEM; 2174 if (IS_ERR(tcp_conn->data_rx_hash.tfm)) {
2175 crypto_free_hash(tcp_conn->data_tx_hash.tfm);
2176 return PTR_ERR(tcp_conn->data_rx_hash.tfm);
2165 } 2177 }
2166 } else { 2178 } else {
2167 if (tcp_conn->data_tx_tfm) 2179 if (tcp_conn->data_tx_hash.tfm)
2168 crypto_free_tfm(tcp_conn->data_tx_tfm); 2180 crypto_free_hash(tcp_conn->data_tx_hash.tfm);
2169 if (tcp_conn->data_rx_tfm) 2181 if (tcp_conn->data_rx_hash.tfm)
2170 crypto_free_tfm(tcp_conn->data_rx_tfm); 2182 crypto_free_hash(tcp_conn->data_rx_hash.tfm);
2171 } 2183 }
2172 tcp_conn->sendpage = conn->datadgst_en ? 2184 tcp_conn->sendpage = conn->datadgst_en ?
2173 sock_no_sendpage : tcp_conn->sock->ops->sendpage; 2185 sock_no_sendpage : tcp_conn->sock->ops->sendpage;
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 6a4ee704e46e..e35701305fc9 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -51,6 +51,7 @@
51#define ISCSI_SG_TABLESIZE SG_ALL 51#define ISCSI_SG_TABLESIZE SG_ALL
52#define ISCSI_TCP_MAX_CMD_LEN 16 52#define ISCSI_TCP_MAX_CMD_LEN 16
53 53
54struct crypto_hash;
54struct socket; 55struct socket;
55 56
56/* Socket connection recieve helper */ 57/* Socket connection recieve helper */
@@ -84,8 +85,8 @@ struct iscsi_tcp_conn {
84 /* iSCSI connection-wide sequencing */ 85 /* iSCSI connection-wide sequencing */
85 int hdr_size; /* PDU header size */ 86 int hdr_size; /* PDU header size */
86 87
87 struct crypto_tfm *rx_tfm; /* CRC32C (Rx) */ 88 struct crypto_hash *rx_tfm; /* CRC32C (Rx) */
88 struct crypto_tfm *data_rx_tfm; /* CRC32C (Rx) for data */ 89 struct hash_desc data_rx_hash; /* CRC32C (Rx) for data */
89 90
90 /* control data */ 91 /* control data */
91 struct iscsi_tcp_recv in; /* TCP receive context */ 92 struct iscsi_tcp_recv in; /* TCP receive context */
@@ -97,8 +98,8 @@ struct iscsi_tcp_conn {
97 void (*old_write_space)(struct sock *); 98 void (*old_write_space)(struct sock *);
98 99
99 /* xmit */ 100 /* xmit */
100 struct crypto_tfm *tx_tfm; /* CRC32C (Tx) */ 101 struct crypto_hash *tx_tfm; /* CRC32C (Tx) */
101 struct crypto_tfm *data_tx_tfm; /* CRC32C (Tx) for data */ 102 struct hash_desc data_tx_hash; /* CRC32C (Tx) for data */
102 103
103 /* MIB custom statistics */ 104 /* MIB custom statistics */
104 uint32_t sendpage_failures_cnt; 105 uint32_t sendpage_failures_cnt;
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 06da7506363c..e35d7e52fdeb 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -33,7 +33,7 @@
33* 33*
34*/ 34*/
35 35
36 36#include <linux/err.h>
37#include <linux/sunrpc/svc.h> 37#include <linux/sunrpc/svc.h>
38#include <linux/nfsd/nfsd.h> 38#include <linux/nfsd/nfsd.h>
39#include <linux/nfs4.h> 39#include <linux/nfs4.h>
@@ -87,34 +87,35 @@ int
87nfs4_make_rec_clidname(char *dname, struct xdr_netobj *clname) 87nfs4_make_rec_clidname(char *dname, struct xdr_netobj *clname)
88{ 88{
89 struct xdr_netobj cksum; 89 struct xdr_netobj cksum;
90 struct crypto_tfm *tfm; 90 struct hash_desc desc;
91 struct scatterlist sg[1]; 91 struct scatterlist sg[1];
92 int status = nfserr_resource; 92 int status = nfserr_resource;
93 93
94 dprintk("NFSD: nfs4_make_rec_clidname for %.*s\n", 94 dprintk("NFSD: nfs4_make_rec_clidname for %.*s\n",
95 clname->len, clname->data); 95 clname->len, clname->data);
96 tfm = crypto_alloc_tfm("md5", CRYPTO_TFM_REQ_MAY_SLEEP); 96 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
97 if (tfm == NULL) 97 desc.tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
98 goto out; 98 if (IS_ERR(desc.tfm))
99 cksum.len = crypto_tfm_alg_digestsize(tfm); 99 goto out_no_tfm;
100 cksum.len = crypto_hash_digestsize(desc.tfm);
100 cksum.data = kmalloc(cksum.len, GFP_KERNEL); 101 cksum.data = kmalloc(cksum.len, GFP_KERNEL);
101 if (cksum.data == NULL) 102 if (cksum.data == NULL)
102 goto out; 103 goto out;
103 crypto_digest_init(tfm);
104 104
105 sg[0].page = virt_to_page(clname->data); 105 sg[0].page = virt_to_page(clname->data);
106 sg[0].offset = offset_in_page(clname->data); 106 sg[0].offset = offset_in_page(clname->data);
107 sg[0].length = clname->len; 107 sg[0].length = clname->len;
108 108
109 crypto_digest_update(tfm, sg, 1); 109 if (crypto_hash_digest(&desc, sg, sg->length, cksum.data))
110 crypto_digest_final(tfm, cksum.data); 110 goto out;
111 111
112 md5_to_hex(dname, cksum.data); 112 md5_to_hex(dname, cksum.data);
113 113
114 kfree(cksum.data); 114 kfree(cksum.data);
115 status = nfs_ok; 115 status = nfs_ok;
116out: 116out:
117 crypto_free_tfm(tfm); 117 crypto_free_hash(desc.tfm);
118out_no_tfm:
118 return status; 119 return status;
119} 120}
120 121
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
new file mode 100644
index 000000000000..5748aecdb414
--- /dev/null
+++ b/include/crypto/algapi.h
@@ -0,0 +1,156 @@
1/*
2 * Cryptographic API for algorithms (i.e., low-level API).
3 *
4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12#ifndef _CRYPTO_ALGAPI_H
13#define _CRYPTO_ALGAPI_H
14
15#include <linux/crypto.h>
16
17struct module;
18struct seq_file;
19
20struct crypto_type {
21 unsigned int (*ctxsize)(struct crypto_alg *alg);
22 int (*init)(struct crypto_tfm *tfm);
23 void (*exit)(struct crypto_tfm *tfm);
24 void (*show)(struct seq_file *m, struct crypto_alg *alg);
25};
26
27struct crypto_instance {
28 struct crypto_alg alg;
29
30 struct crypto_template *tmpl;
31 struct hlist_node list;
32
33 void *__ctx[] CRYPTO_MINALIGN_ATTR;
34};
35
36struct crypto_template {
37 struct list_head list;
38 struct hlist_head instances;
39 struct module *module;
40
41 struct crypto_instance *(*alloc)(void *param, unsigned int len);
42 void (*free)(struct crypto_instance *inst);
43
44 char name[CRYPTO_MAX_ALG_NAME];
45};
46
47struct crypto_spawn {
48 struct list_head list;
49 struct crypto_alg *alg;
50 struct crypto_instance *inst;
51};
52
53struct scatter_walk {
54 struct scatterlist *sg;
55 unsigned int offset;
56};
57
58struct blkcipher_walk {
59 union {
60 struct {
61 struct page *page;
62 unsigned long offset;
63 } phys;
64
65 struct {
66 u8 *page;
67 u8 *addr;
68 } virt;
69 } src, dst;
70
71 struct scatter_walk in;
72 unsigned int nbytes;
73
74 struct scatter_walk out;
75 unsigned int total;
76
77 void *page;
78 u8 *buffer;
79 u8 *iv;
80
81 int flags;
82};
83
84extern const struct crypto_type crypto_blkcipher_type;
85extern const struct crypto_type crypto_hash_type;
86
87void crypto_mod_put(struct crypto_alg *alg);
88
89int crypto_register_template(struct crypto_template *tmpl);
90void crypto_unregister_template(struct crypto_template *tmpl);
91struct crypto_template *crypto_lookup_template(const char *name);
92
93int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
94 struct crypto_instance *inst);
95void crypto_drop_spawn(struct crypto_spawn *spawn);
96struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn);
97
98struct crypto_alg *crypto_get_attr_alg(void *param, unsigned int len,
99 u32 type, u32 mask);
100struct crypto_instance *crypto_alloc_instance(const char *name,
101 struct crypto_alg *alg);
102
103int blkcipher_walk_done(struct blkcipher_desc *desc,
104 struct blkcipher_walk *walk, int err);
105int blkcipher_walk_virt(struct blkcipher_desc *desc,
106 struct blkcipher_walk *walk);
107int blkcipher_walk_phys(struct blkcipher_desc *desc,
108 struct blkcipher_walk *walk);
109
110static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
111{
112 unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm);
113 unsigned long align = crypto_tfm_alg_alignmask(tfm);
114
115 if (align <= crypto_tfm_ctx_alignment())
116 align = 1;
117 return (void *)ALIGN(addr, align);
118}
119
120static inline void *crypto_instance_ctx(struct crypto_instance *inst)
121{
122 return inst->__ctx;
123}
124
125static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm)
126{
127 return crypto_tfm_ctx(&tfm->base);
128}
129
130static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm)
131{
132 return crypto_tfm_ctx_aligned(&tfm->base);
133}
134
135static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
136{
137 return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
138}
139
140static inline void *crypto_hash_ctx_aligned(struct crypto_hash *tfm)
141{
142 return crypto_tfm_ctx_aligned(&tfm->base);
143}
144
145static inline void blkcipher_walk_init(struct blkcipher_walk *walk,
146 struct scatterlist *dst,
147 struct scatterlist *src,
148 unsigned int nbytes)
149{
150 walk->in.sg = src;
151 walk->out.sg = dst;
152 walk->total = nbytes;
153}
154
155#endif /* _CRYPTO_ALGAPI_H */
156
diff --git a/include/crypto/twofish.h b/include/crypto/twofish.h
new file mode 100644
index 000000000000..c408522595c6
--- /dev/null
+++ b/include/crypto/twofish.h
@@ -0,0 +1,22 @@
1#ifndef _CRYPTO_TWOFISH_H
2#define _CRYPTO_TWOFISH_H
3
4#include <linux/types.h>
5
6#define TF_MIN_KEY_SIZE 16
7#define TF_MAX_KEY_SIZE 32
8#define TF_BLOCK_SIZE 16
9
10struct crypto_tfm;
11
12/* Structure for an expanded Twofish key. s contains the key-dependent
13 * S-boxes composed with the MDS matrix; w contains the eight "whitening"
14 * subkeys, K[0] through K[7]. k holds the remaining, "round" subkeys. Note
15 * that k[i] corresponds to what the Twofish paper calls K[i+8]. */
16struct twofish_ctx {
17 u32 s[4][256], w[8], k[32];
18};
19
20int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len);
21
22#endif
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 7f946241b879..8f2ffa4caabf 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -17,20 +17,36 @@
17#ifndef _LINUX_CRYPTO_H 17#ifndef _LINUX_CRYPTO_H
18#define _LINUX_CRYPTO_H 18#define _LINUX_CRYPTO_H
19 19
20#include <asm/atomic.h>
20#include <linux/module.h> 21#include <linux/module.h>
21#include <linux/kernel.h> 22#include <linux/kernel.h>
22#include <linux/types.h>
23#include <linux/list.h> 23#include <linux/list.h>
24#include <linux/slab.h>
24#include <linux/string.h> 25#include <linux/string.h>
25#include <asm/page.h> 26#include <linux/uaccess.h>
26 27
27/* 28/*
28 * Algorithm masks and types. 29 * Algorithm masks and types.
29 */ 30 */
30#define CRYPTO_ALG_TYPE_MASK 0x000000ff 31#define CRYPTO_ALG_TYPE_MASK 0x0000000f
31#define CRYPTO_ALG_TYPE_CIPHER 0x00000001 32#define CRYPTO_ALG_TYPE_CIPHER 0x00000001
32#define CRYPTO_ALG_TYPE_DIGEST 0x00000002 33#define CRYPTO_ALG_TYPE_DIGEST 0x00000002
33#define CRYPTO_ALG_TYPE_COMPRESS 0x00000004 34#define CRYPTO_ALG_TYPE_HASH 0x00000003
35#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004
36#define CRYPTO_ALG_TYPE_COMPRESS 0x00000005
37
38#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
39
40#define CRYPTO_ALG_LARVAL 0x00000010
41#define CRYPTO_ALG_DEAD 0x00000020
42#define CRYPTO_ALG_DYING 0x00000040
43#define CRYPTO_ALG_ASYNC 0x00000080
44
45/*
46 * Set this bit if and only if the algorithm requires another algorithm of
47 * the same type to handle corner cases.
48 */
49#define CRYPTO_ALG_NEED_FALLBACK 0x00000100
34 50
35/* 51/*
36 * Transform masks and values (for crt_flags). 52 * Transform masks and values (for crt_flags).
@@ -61,8 +77,37 @@
61#define CRYPTO_DIR_ENCRYPT 1 77#define CRYPTO_DIR_ENCRYPT 1
62#define CRYPTO_DIR_DECRYPT 0 78#define CRYPTO_DIR_DECRYPT 0
63 79
80/*
81 * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual
82 * declaration) is used to ensure that the crypto_tfm context structure is
83 * aligned correctly for the given architecture so that there are no alignment
84 * faults for C data types. In particular, this is required on platforms such
85 * as arm where pointers are 32-bit aligned but there are data types such as
86 * u64 which require 64-bit alignment.
87 */
88#if defined(ARCH_KMALLOC_MINALIGN)
89#define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN
90#elif defined(ARCH_SLAB_MINALIGN)
91#define CRYPTO_MINALIGN ARCH_SLAB_MINALIGN
92#endif
93
94#ifdef CRYPTO_MINALIGN
95#define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN)))
96#else
97#define CRYPTO_MINALIGN_ATTR
98#endif
99
64struct scatterlist; 100struct scatterlist;
101struct crypto_blkcipher;
102struct crypto_hash;
65struct crypto_tfm; 103struct crypto_tfm;
104struct crypto_type;
105
106struct blkcipher_desc {
107 struct crypto_blkcipher *tfm;
108 void *info;
109 u32 flags;
110};
66 111
67struct cipher_desc { 112struct cipher_desc {
68 struct crypto_tfm *tfm; 113 struct crypto_tfm *tfm;
@@ -72,30 +117,50 @@ struct cipher_desc {
72 void *info; 117 void *info;
73}; 118};
74 119
120struct hash_desc {
121 struct crypto_hash *tfm;
122 u32 flags;
123};
124
75/* 125/*
76 * Algorithms: modular crypto algorithm implementations, managed 126 * Algorithms: modular crypto algorithm implementations, managed
77 * via crypto_register_alg() and crypto_unregister_alg(). 127 * via crypto_register_alg() and crypto_unregister_alg().
78 */ 128 */
129struct blkcipher_alg {
130 int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
131 unsigned int keylen);
132 int (*encrypt)(struct blkcipher_desc *desc,
133 struct scatterlist *dst, struct scatterlist *src,
134 unsigned int nbytes);
135 int (*decrypt)(struct blkcipher_desc *desc,
136 struct scatterlist *dst, struct scatterlist *src,
137 unsigned int nbytes);
138
139 unsigned int min_keysize;
140 unsigned int max_keysize;
141 unsigned int ivsize;
142};
143
79struct cipher_alg { 144struct cipher_alg {
80 unsigned int cia_min_keysize; 145 unsigned int cia_min_keysize;
81 unsigned int cia_max_keysize; 146 unsigned int cia_max_keysize;
82 int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key, 147 int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key,
83 unsigned int keylen, u32 *flags); 148 unsigned int keylen);
84 void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 149 void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
85 void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 150 void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
86 151
87 unsigned int (*cia_encrypt_ecb)(const struct cipher_desc *desc, 152 unsigned int (*cia_encrypt_ecb)(const struct cipher_desc *desc,
88 u8 *dst, const u8 *src, 153 u8 *dst, const u8 *src,
89 unsigned int nbytes); 154 unsigned int nbytes) __deprecated;
90 unsigned int (*cia_decrypt_ecb)(const struct cipher_desc *desc, 155 unsigned int (*cia_decrypt_ecb)(const struct cipher_desc *desc,
91 u8 *dst, const u8 *src, 156 u8 *dst, const u8 *src,
92 unsigned int nbytes); 157 unsigned int nbytes) __deprecated;
93 unsigned int (*cia_encrypt_cbc)(const struct cipher_desc *desc, 158 unsigned int (*cia_encrypt_cbc)(const struct cipher_desc *desc,
94 u8 *dst, const u8 *src, 159 u8 *dst, const u8 *src,
95 unsigned int nbytes); 160 unsigned int nbytes) __deprecated;
96 unsigned int (*cia_decrypt_cbc)(const struct cipher_desc *desc, 161 unsigned int (*cia_decrypt_cbc)(const struct cipher_desc *desc,
97 u8 *dst, const u8 *src, 162 u8 *dst, const u8 *src,
98 unsigned int nbytes); 163 unsigned int nbytes) __deprecated;
99}; 164};
100 165
101struct digest_alg { 166struct digest_alg {
@@ -105,7 +170,20 @@ struct digest_alg {
105 unsigned int len); 170 unsigned int len);
106 void (*dia_final)(struct crypto_tfm *tfm, u8 *out); 171 void (*dia_final)(struct crypto_tfm *tfm, u8 *out);
107 int (*dia_setkey)(struct crypto_tfm *tfm, const u8 *key, 172 int (*dia_setkey)(struct crypto_tfm *tfm, const u8 *key,
108 unsigned int keylen, u32 *flags); 173 unsigned int keylen);
174};
175
176struct hash_alg {
177 int (*init)(struct hash_desc *desc);
178 int (*update)(struct hash_desc *desc, struct scatterlist *sg,
179 unsigned int nbytes);
180 int (*final)(struct hash_desc *desc, u8 *out);
181 int (*digest)(struct hash_desc *desc, struct scatterlist *sg,
182 unsigned int nbytes, u8 *out);
183 int (*setkey)(struct crypto_hash *tfm, const u8 *key,
184 unsigned int keylen);
185
186 unsigned int digestsize;
109}; 187};
110 188
111struct compress_alg { 189struct compress_alg {
@@ -115,30 +193,40 @@ struct compress_alg {
115 unsigned int slen, u8 *dst, unsigned int *dlen); 193 unsigned int slen, u8 *dst, unsigned int *dlen);
116}; 194};
117 195
196#define cra_blkcipher cra_u.blkcipher
118#define cra_cipher cra_u.cipher 197#define cra_cipher cra_u.cipher
119#define cra_digest cra_u.digest 198#define cra_digest cra_u.digest
199#define cra_hash cra_u.hash
120#define cra_compress cra_u.compress 200#define cra_compress cra_u.compress
121 201
122struct crypto_alg { 202struct crypto_alg {
123 struct list_head cra_list; 203 struct list_head cra_list;
204 struct list_head cra_users;
205
124 u32 cra_flags; 206 u32 cra_flags;
125 unsigned int cra_blocksize; 207 unsigned int cra_blocksize;
126 unsigned int cra_ctxsize; 208 unsigned int cra_ctxsize;
127 unsigned int cra_alignmask; 209 unsigned int cra_alignmask;
128 210
129 int cra_priority; 211 int cra_priority;
212 atomic_t cra_refcnt;
130 213
131 char cra_name[CRYPTO_MAX_ALG_NAME]; 214 char cra_name[CRYPTO_MAX_ALG_NAME];
132 char cra_driver_name[CRYPTO_MAX_ALG_NAME]; 215 char cra_driver_name[CRYPTO_MAX_ALG_NAME];
133 216
217 const struct crypto_type *cra_type;
218
134 union { 219 union {
220 struct blkcipher_alg blkcipher;
135 struct cipher_alg cipher; 221 struct cipher_alg cipher;
136 struct digest_alg digest; 222 struct digest_alg digest;
223 struct hash_alg hash;
137 struct compress_alg compress; 224 struct compress_alg compress;
138 } cra_u; 225 } cra_u;
139 226
140 int (*cra_init)(struct crypto_tfm *tfm); 227 int (*cra_init)(struct crypto_tfm *tfm);
141 void (*cra_exit)(struct crypto_tfm *tfm); 228 void (*cra_exit)(struct crypto_tfm *tfm);
229 void (*cra_destroy)(struct crypto_alg *alg);
142 230
143 struct module *cra_module; 231 struct module *cra_module;
144}; 232};
@@ -153,20 +241,39 @@ int crypto_unregister_alg(struct crypto_alg *alg);
153 * Algorithm query interface. 241 * Algorithm query interface.
154 */ 242 */
155#ifdef CONFIG_CRYPTO 243#ifdef CONFIG_CRYPTO
156int crypto_alg_available(const char *name, u32 flags); 244int crypto_alg_available(const char *name, u32 flags)
245 __deprecated_for_modules;
246int crypto_has_alg(const char *name, u32 type, u32 mask);
157#else 247#else
248static int crypto_alg_available(const char *name, u32 flags);
249 __deprecated_for_modules;
158static inline int crypto_alg_available(const char *name, u32 flags) 250static inline int crypto_alg_available(const char *name, u32 flags)
159{ 251{
160 return 0; 252 return 0;
161} 253}
254
255static inline int crypto_has_alg(const char *name, u32 type, u32 mask)
256{
257 return 0;
258}
162#endif 259#endif
163 260
164/* 261/*
165 * Transforms: user-instantiated objects which encapsulate algorithms 262 * Transforms: user-instantiated objects which encapsulate algorithms
166 * and core processing logic. Managed via crypto_alloc_tfm() and 263 * and core processing logic. Managed via crypto_alloc_*() and
167 * crypto_free_tfm(), as well as the various helpers below. 264 * crypto_free_*(), as well as the various helpers below.
168 */ 265 */
169 266
267struct blkcipher_tfm {
268 void *iv;
269 int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
270 unsigned int keylen);
271 int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
272 struct scatterlist *src, unsigned int nbytes);
273 int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
274 struct scatterlist *src, unsigned int nbytes);
275};
276
170struct cipher_tfm { 277struct cipher_tfm {
171 void *cit_iv; 278 void *cit_iv;
172 unsigned int cit_ivsize; 279 unsigned int cit_ivsize;
@@ -190,20 +297,20 @@ struct cipher_tfm {
190 struct scatterlist *src, 297 struct scatterlist *src,
191 unsigned int nbytes, u8 *iv); 298 unsigned int nbytes, u8 *iv);
192 void (*cit_xor_block)(u8 *dst, const u8 *src); 299 void (*cit_xor_block)(u8 *dst, const u8 *src);
300 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
301 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
193}; 302};
194 303
195struct digest_tfm { 304struct hash_tfm {
196 void (*dit_init)(struct crypto_tfm *tfm); 305 int (*init)(struct hash_desc *desc);
197 void (*dit_update)(struct crypto_tfm *tfm, 306 int (*update)(struct hash_desc *desc,
198 struct scatterlist *sg, unsigned int nsg); 307 struct scatterlist *sg, unsigned int nsg);
199 void (*dit_final)(struct crypto_tfm *tfm, u8 *out); 308 int (*final)(struct hash_desc *desc, u8 *out);
200 void (*dit_digest)(struct crypto_tfm *tfm, struct scatterlist *sg, 309 int (*digest)(struct hash_desc *desc, struct scatterlist *sg,
201 unsigned int nsg, u8 *out); 310 unsigned int nsg, u8 *out);
202 int (*dit_setkey)(struct crypto_tfm *tfm, 311 int (*setkey)(struct crypto_hash *tfm, const u8 *key,
203 const u8 *key, unsigned int keylen); 312 unsigned int keylen);
204#ifdef CONFIG_CRYPTO_HMAC 313 unsigned int digestsize;
205 void *dit_hmac_block;
206#endif
207}; 314};
208 315
209struct compress_tfm { 316struct compress_tfm {
@@ -215,8 +322,9 @@ struct compress_tfm {
215 u8 *dst, unsigned int *dlen); 322 u8 *dst, unsigned int *dlen);
216}; 323};
217 324
325#define crt_blkcipher crt_u.blkcipher
218#define crt_cipher crt_u.cipher 326#define crt_cipher crt_u.cipher
219#define crt_digest crt_u.digest 327#define crt_hash crt_u.hash
220#define crt_compress crt_u.compress 328#define crt_compress crt_u.compress
221 329
222struct crypto_tfm { 330struct crypto_tfm {
@@ -224,30 +332,43 @@ struct crypto_tfm {
224 u32 crt_flags; 332 u32 crt_flags;
225 333
226 union { 334 union {
335 struct blkcipher_tfm blkcipher;
227 struct cipher_tfm cipher; 336 struct cipher_tfm cipher;
228 struct digest_tfm digest; 337 struct hash_tfm hash;
229 struct compress_tfm compress; 338 struct compress_tfm compress;
230 } crt_u; 339 } crt_u;
231 340
232 struct crypto_alg *__crt_alg; 341 struct crypto_alg *__crt_alg;
233 342
234 char __crt_ctx[] __attribute__ ((__aligned__)); 343 void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
344};
345
346#define crypto_cipher crypto_tfm
347#define crypto_comp crypto_tfm
348
349struct crypto_blkcipher {
350 struct crypto_tfm base;
351};
352
353struct crypto_hash {
354 struct crypto_tfm base;
355};
356
357enum {
358 CRYPTOA_UNSPEC,
359 CRYPTOA_ALG,
360};
361
362struct crypto_attr_alg {
363 char name[CRYPTO_MAX_ALG_NAME];
235}; 364};
236 365
237/* 366/*
238 * Transform user interface. 367 * Transform user interface.
239 */ 368 */
240 369
241/*
242 * crypto_alloc_tfm() will first attempt to locate an already loaded algorithm.
243 * If that fails and the kernel supports dynamically loadable modules, it
244 * will then attempt to load a module of the same name or alias. A refcount
245 * is grabbed on the algorithm which is then associated with the new transform.
246 *
247 * crypto_free_tfm() frees up the transform and any associated resources,
248 * then drops the refcount on the associated algorithm.
249 */
250struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, u32 tfm_flags); 370struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, u32 tfm_flags);
371struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
251void crypto_free_tfm(struct crypto_tfm *tfm); 372void crypto_free_tfm(struct crypto_tfm *tfm);
252 373
253/* 374/*
@@ -258,6 +379,16 @@ static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm)
258 return tfm->__crt_alg->cra_name; 379 return tfm->__crt_alg->cra_name;
259} 380}
260 381
382static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm)
383{
384 return tfm->__crt_alg->cra_driver_name;
385}
386
387static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm)
388{
389 return tfm->__crt_alg->cra_priority;
390}
391
261static inline const char *crypto_tfm_alg_modname(struct crypto_tfm *tfm) 392static inline const char *crypto_tfm_alg_modname(struct crypto_tfm *tfm)
262{ 393{
263 return module_name(tfm->__crt_alg->cra_module); 394 return module_name(tfm->__crt_alg->cra_module);
@@ -268,18 +399,23 @@ static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
268 return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK; 399 return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
269} 400}
270 401
402static unsigned int crypto_tfm_alg_min_keysize(struct crypto_tfm *tfm)
403 __deprecated;
271static inline unsigned int crypto_tfm_alg_min_keysize(struct crypto_tfm *tfm) 404static inline unsigned int crypto_tfm_alg_min_keysize(struct crypto_tfm *tfm)
272{ 405{
273 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); 406 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
274 return tfm->__crt_alg->cra_cipher.cia_min_keysize; 407 return tfm->__crt_alg->cra_cipher.cia_min_keysize;
275} 408}
276 409
410static unsigned int crypto_tfm_alg_max_keysize(struct crypto_tfm *tfm)
411 __deprecated;
277static inline unsigned int crypto_tfm_alg_max_keysize(struct crypto_tfm *tfm) 412static inline unsigned int crypto_tfm_alg_max_keysize(struct crypto_tfm *tfm)
278{ 413{
279 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); 414 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
280 return tfm->__crt_alg->cra_cipher.cia_max_keysize; 415 return tfm->__crt_alg->cra_cipher.cia_max_keysize;
281} 416}
282 417
418static unsigned int crypto_tfm_alg_ivsize(struct crypto_tfm *tfm) __deprecated;
283static inline unsigned int crypto_tfm_alg_ivsize(struct crypto_tfm *tfm) 419static inline unsigned int crypto_tfm_alg_ivsize(struct crypto_tfm *tfm)
284{ 420{
285 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); 421 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
@@ -302,6 +438,21 @@ static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm)
302 return tfm->__crt_alg->cra_alignmask; 438 return tfm->__crt_alg->cra_alignmask;
303} 439}
304 440
441static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm)
442{
443 return tfm->crt_flags;
444}
445
446static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags)
447{
448 tfm->crt_flags |= flags;
449}
450
451static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags)
452{
453 tfm->crt_flags &= ~flags;
454}
455
305static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm) 456static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
306{ 457{
307 return tfm->__crt_ctx; 458 return tfm->__crt_ctx;
@@ -316,50 +467,374 @@ static inline unsigned int crypto_tfm_ctx_alignment(void)
316/* 467/*
317 * API wrappers. 468 * API wrappers.
318 */ 469 */
319static inline void crypto_digest_init(struct crypto_tfm *tfm) 470static inline struct crypto_blkcipher *__crypto_blkcipher_cast(
471 struct crypto_tfm *tfm)
320{ 472{
321 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST); 473 return (struct crypto_blkcipher *)tfm;
322 tfm->crt_digest.dit_init(tfm);
323} 474}
324 475
325static inline void crypto_digest_update(struct crypto_tfm *tfm, 476static inline struct crypto_blkcipher *crypto_blkcipher_cast(
326 struct scatterlist *sg, 477 struct crypto_tfm *tfm)
327 unsigned int nsg)
328{ 478{
329 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST); 479 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER);
330 tfm->crt_digest.dit_update(tfm, sg, nsg); 480 return __crypto_blkcipher_cast(tfm);
331} 481}
332 482
333static inline void crypto_digest_final(struct crypto_tfm *tfm, u8 *out) 483static inline struct crypto_blkcipher *crypto_alloc_blkcipher(
484 const char *alg_name, u32 type, u32 mask)
334{ 485{
335 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST); 486 type &= ~CRYPTO_ALG_TYPE_MASK;
336 tfm->crt_digest.dit_final(tfm, out); 487 type |= CRYPTO_ALG_TYPE_BLKCIPHER;
488 mask |= CRYPTO_ALG_TYPE_MASK;
489
490 return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask));
337} 491}
338 492
339static inline void crypto_digest_digest(struct crypto_tfm *tfm, 493static inline struct crypto_tfm *crypto_blkcipher_tfm(
340 struct scatterlist *sg, 494 struct crypto_blkcipher *tfm)
341 unsigned int nsg, u8 *out)
342{ 495{
343 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST); 496 return &tfm->base;
344 tfm->crt_digest.dit_digest(tfm, sg, nsg, out);
345} 497}
346 498
347static inline int crypto_digest_setkey(struct crypto_tfm *tfm, 499static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
500{
501 crypto_free_tfm(crypto_blkcipher_tfm(tfm));
502}
503
504static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
505{
506 type &= ~CRYPTO_ALG_TYPE_MASK;
507 type |= CRYPTO_ALG_TYPE_BLKCIPHER;
508 mask |= CRYPTO_ALG_TYPE_MASK;
509
510 return crypto_has_alg(alg_name, type, mask);
511}
512
513static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm)
514{
515 return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm));
516}
517
518static inline struct blkcipher_tfm *crypto_blkcipher_crt(
519 struct crypto_blkcipher *tfm)
520{
521 return &crypto_blkcipher_tfm(tfm)->crt_blkcipher;
522}
523
524static inline struct blkcipher_alg *crypto_blkcipher_alg(
525 struct crypto_blkcipher *tfm)
526{
527 return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher;
528}
529
530static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm)
531{
532 return crypto_blkcipher_alg(tfm)->ivsize;
533}
534
535static inline unsigned int crypto_blkcipher_blocksize(
536 struct crypto_blkcipher *tfm)
537{
538 return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm));
539}
540
541static inline unsigned int crypto_blkcipher_alignmask(
542 struct crypto_blkcipher *tfm)
543{
544 return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm));
545}
546
547static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm)
548{
549 return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm));
550}
551
552static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm,
553 u32 flags)
554{
555 crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags);
556}
557
558static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm,
559 u32 flags)
560{
561 crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags);
562}
563
564static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
565 const u8 *key, unsigned int keylen)
566{
567 return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm),
568 key, keylen);
569}
570
571static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
572 struct scatterlist *dst,
573 struct scatterlist *src,
574 unsigned int nbytes)
575{
576 desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
577 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
578}
579
580static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
581 struct scatterlist *dst,
582 struct scatterlist *src,
583 unsigned int nbytes)
584{
585 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
586}
587
588static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
589 struct scatterlist *dst,
590 struct scatterlist *src,
591 unsigned int nbytes)
592{
593 desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
594 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
595}
596
597static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
598 struct scatterlist *dst,
599 struct scatterlist *src,
600 unsigned int nbytes)
601{
602 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
603}
604
605static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm,
606 const u8 *src, unsigned int len)
607{
608 memcpy(crypto_blkcipher_crt(tfm)->iv, src, len);
609}
610
611static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm,
612 u8 *dst, unsigned int len)
613{
614 memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len);
615}
616
617static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
618{
619 return (struct crypto_cipher *)tfm;
620}
621
622static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm)
623{
624 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
625 return __crypto_cipher_cast(tfm);
626}
627
628static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
629 u32 type, u32 mask)
630{
631 type &= ~CRYPTO_ALG_TYPE_MASK;
632 type |= CRYPTO_ALG_TYPE_CIPHER;
633 mask |= CRYPTO_ALG_TYPE_MASK;
634
635 return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask));
636}
637
638static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
639{
640 return tfm;
641}
642
643static inline void crypto_free_cipher(struct crypto_cipher *tfm)
644{
645 crypto_free_tfm(crypto_cipher_tfm(tfm));
646}
647
648static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
649{
650 type &= ~CRYPTO_ALG_TYPE_MASK;
651 type |= CRYPTO_ALG_TYPE_CIPHER;
652 mask |= CRYPTO_ALG_TYPE_MASK;
653
654 return crypto_has_alg(alg_name, type, mask);
655}
656
657static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm)
658{
659 return &crypto_cipher_tfm(tfm)->crt_cipher;
660}
661
662static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
663{
664 return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
665}
666
667static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm)
668{
669 return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm));
670}
671
672static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm)
673{
674 return crypto_tfm_get_flags(crypto_cipher_tfm(tfm));
675}
676
677static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm,
678 u32 flags)
679{
680 crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags);
681}
682
683static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
684 u32 flags)
685{
686 crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
687}
688
689static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
348 const u8 *key, unsigned int keylen) 690 const u8 *key, unsigned int keylen)
349{ 691{
350 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST); 692 return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm),
351 if (tfm->crt_digest.dit_setkey == NULL) 693 key, keylen);
352 return -ENOSYS; 694}
353 return tfm->crt_digest.dit_setkey(tfm, key, keylen); 695
696static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
697 u8 *dst, const u8 *src)
698{
699 crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm),
700 dst, src);
701}
702
703static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
704 u8 *dst, const u8 *src)
705{
706 crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm),
707 dst, src);
708}
709
710void crypto_digest_init(struct crypto_tfm *tfm) __deprecated_for_modules;
711void crypto_digest_update(struct crypto_tfm *tfm,
712 struct scatterlist *sg, unsigned int nsg)
713 __deprecated_for_modules;
714void crypto_digest_final(struct crypto_tfm *tfm, u8 *out)
715 __deprecated_for_modules;
716void crypto_digest_digest(struct crypto_tfm *tfm,
717 struct scatterlist *sg, unsigned int nsg, u8 *out)
718 __deprecated_for_modules;
719
720static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm)
721{
722 return (struct crypto_hash *)tfm;
723}
724
725static inline struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm)
726{
727 BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_HASH) &
728 CRYPTO_ALG_TYPE_HASH_MASK);
729 return __crypto_hash_cast(tfm);
354} 730}
355 731
356static inline int crypto_cipher_setkey(struct crypto_tfm *tfm, 732static int crypto_digest_setkey(struct crypto_tfm *tfm, const u8 *key,
733 unsigned int keylen) __deprecated;
734static inline int crypto_digest_setkey(struct crypto_tfm *tfm,
357 const u8 *key, unsigned int keylen) 735 const u8 *key, unsigned int keylen)
358{ 736{
359 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); 737 return tfm->crt_hash.setkey(crypto_hash_cast(tfm), key, keylen);
360 return tfm->crt_cipher.cit_setkey(tfm, key, keylen); 738}
739
740static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name,
741 u32 type, u32 mask)
742{
743 type &= ~CRYPTO_ALG_TYPE_MASK;
744 type |= CRYPTO_ALG_TYPE_HASH;
745 mask |= CRYPTO_ALG_TYPE_HASH_MASK;
746
747 return __crypto_hash_cast(crypto_alloc_base(alg_name, type, mask));
748}
749
750static inline struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm)
751{
752 return &tfm->base;
753}
754
755static inline void crypto_free_hash(struct crypto_hash *tfm)
756{
757 crypto_free_tfm(crypto_hash_tfm(tfm));
758}
759
760static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask)
761{
762 type &= ~CRYPTO_ALG_TYPE_MASK;
763 type |= CRYPTO_ALG_TYPE_HASH;
764 mask |= CRYPTO_ALG_TYPE_HASH_MASK;
765
766 return crypto_has_alg(alg_name, type, mask);
767}
768
769static inline struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm)
770{
771 return &crypto_hash_tfm(tfm)->crt_hash;
772}
773
774static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm)
775{
776 return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm));
777}
778
779static inline unsigned int crypto_hash_alignmask(struct crypto_hash *tfm)
780{
781 return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm));
782}
783
784static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm)
785{
786 return crypto_hash_crt(tfm)->digestsize;
787}
788
789static inline u32 crypto_hash_get_flags(struct crypto_hash *tfm)
790{
791 return crypto_tfm_get_flags(crypto_hash_tfm(tfm));
792}
793
794static inline void crypto_hash_set_flags(struct crypto_hash *tfm, u32 flags)
795{
796 crypto_tfm_set_flags(crypto_hash_tfm(tfm), flags);
361} 797}
362 798
799static inline void crypto_hash_clear_flags(struct crypto_hash *tfm, u32 flags)
800{
801 crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags);
802}
803
804static inline int crypto_hash_init(struct hash_desc *desc)
805{
806 return crypto_hash_crt(desc->tfm)->init(desc);
807}
808
809static inline int crypto_hash_update(struct hash_desc *desc,
810 struct scatterlist *sg,
811 unsigned int nbytes)
812{
813 return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes);
814}
815
816static inline int crypto_hash_final(struct hash_desc *desc, u8 *out)
817{
818 return crypto_hash_crt(desc->tfm)->final(desc, out);
819}
820
821static inline int crypto_hash_digest(struct hash_desc *desc,
822 struct scatterlist *sg,
823 unsigned int nbytes, u8 *out)
824{
825 return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out);
826}
827
828static inline int crypto_hash_setkey(struct crypto_hash *hash,
829 const u8 *key, unsigned int keylen)
830{
831 return crypto_hash_crt(hash)->setkey(hash, key, keylen);
832}
833
834static int crypto_cipher_encrypt(struct crypto_tfm *tfm,
835 struct scatterlist *dst,
836 struct scatterlist *src,
837 unsigned int nbytes) __deprecated;
363static inline int crypto_cipher_encrypt(struct crypto_tfm *tfm, 838static inline int crypto_cipher_encrypt(struct crypto_tfm *tfm,
364 struct scatterlist *dst, 839 struct scatterlist *dst,
365 struct scatterlist *src, 840 struct scatterlist *src,
@@ -369,16 +844,23 @@ static inline int crypto_cipher_encrypt(struct crypto_tfm *tfm,
369 return tfm->crt_cipher.cit_encrypt(tfm, dst, src, nbytes); 844 return tfm->crt_cipher.cit_encrypt(tfm, dst, src, nbytes);
370} 845}
371 846
847static int crypto_cipher_encrypt_iv(struct crypto_tfm *tfm,
848 struct scatterlist *dst,
849 struct scatterlist *src,
850 unsigned int nbytes, u8 *iv) __deprecated;
372static inline int crypto_cipher_encrypt_iv(struct crypto_tfm *tfm, 851static inline int crypto_cipher_encrypt_iv(struct crypto_tfm *tfm,
373 struct scatterlist *dst, 852 struct scatterlist *dst,
374 struct scatterlist *src, 853 struct scatterlist *src,
375 unsigned int nbytes, u8 *iv) 854 unsigned int nbytes, u8 *iv)
376{ 855{
377 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); 856 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
378 BUG_ON(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_ECB);
379 return tfm->crt_cipher.cit_encrypt_iv(tfm, dst, src, nbytes, iv); 857 return tfm->crt_cipher.cit_encrypt_iv(tfm, dst, src, nbytes, iv);
380} 858}
381 859
860static int crypto_cipher_decrypt(struct crypto_tfm *tfm,
861 struct scatterlist *dst,
862 struct scatterlist *src,
863 unsigned int nbytes) __deprecated;
382static inline int crypto_cipher_decrypt(struct crypto_tfm *tfm, 864static inline int crypto_cipher_decrypt(struct crypto_tfm *tfm,
383 struct scatterlist *dst, 865 struct scatterlist *dst,
384 struct scatterlist *src, 866 struct scatterlist *src,
@@ -388,16 +870,21 @@ static inline int crypto_cipher_decrypt(struct crypto_tfm *tfm,
388 return tfm->crt_cipher.cit_decrypt(tfm, dst, src, nbytes); 870 return tfm->crt_cipher.cit_decrypt(tfm, dst, src, nbytes);
389} 871}
390 872
873static int crypto_cipher_decrypt_iv(struct crypto_tfm *tfm,
874 struct scatterlist *dst,
875 struct scatterlist *src,
876 unsigned int nbytes, u8 *iv) __deprecated;
391static inline int crypto_cipher_decrypt_iv(struct crypto_tfm *tfm, 877static inline int crypto_cipher_decrypt_iv(struct crypto_tfm *tfm,
392 struct scatterlist *dst, 878 struct scatterlist *dst,
393 struct scatterlist *src, 879 struct scatterlist *src,
394 unsigned int nbytes, u8 *iv) 880 unsigned int nbytes, u8 *iv)
395{ 881{
396 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); 882 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
397 BUG_ON(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_ECB);
398 return tfm->crt_cipher.cit_decrypt_iv(tfm, dst, src, nbytes, iv); 883 return tfm->crt_cipher.cit_decrypt_iv(tfm, dst, src, nbytes, iv);
399} 884}
400 885
886static void crypto_cipher_set_iv(struct crypto_tfm *tfm,
887 const u8 *src, unsigned int len) __deprecated;
401static inline void crypto_cipher_set_iv(struct crypto_tfm *tfm, 888static inline void crypto_cipher_set_iv(struct crypto_tfm *tfm,
402 const u8 *src, unsigned int len) 889 const u8 *src, unsigned int len)
403{ 890{
@@ -405,6 +892,8 @@ static inline void crypto_cipher_set_iv(struct crypto_tfm *tfm,
405 memcpy(tfm->crt_cipher.cit_iv, src, len); 892 memcpy(tfm->crt_cipher.cit_iv, src, len);
406} 893}
407 894
895static void crypto_cipher_get_iv(struct crypto_tfm *tfm,
896 u8 *dst, unsigned int len) __deprecated;
408static inline void crypto_cipher_get_iv(struct crypto_tfm *tfm, 897static inline void crypto_cipher_get_iv(struct crypto_tfm *tfm,
409 u8 *dst, unsigned int len) 898 u8 *dst, unsigned int len)
410{ 899{
@@ -412,34 +901,70 @@ static inline void crypto_cipher_get_iv(struct crypto_tfm *tfm,
412 memcpy(dst, tfm->crt_cipher.cit_iv, len); 901 memcpy(dst, tfm->crt_cipher.cit_iv, len);
413} 902}
414 903
415static inline int crypto_comp_compress(struct crypto_tfm *tfm, 904static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
905{
906 return (struct crypto_comp *)tfm;
907}
908
909static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm)
910{
911 BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) &
912 CRYPTO_ALG_TYPE_MASK);
913 return __crypto_comp_cast(tfm);
914}
915
916static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name,
917 u32 type, u32 mask)
918{
919 type &= ~CRYPTO_ALG_TYPE_MASK;
920 type |= CRYPTO_ALG_TYPE_COMPRESS;
921 mask |= CRYPTO_ALG_TYPE_MASK;
922
923 return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask));
924}
925
926static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm)
927{
928 return tfm;
929}
930
931static inline void crypto_free_comp(struct crypto_comp *tfm)
932{
933 crypto_free_tfm(crypto_comp_tfm(tfm));
934}
935
936static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask)
937{
938 type &= ~CRYPTO_ALG_TYPE_MASK;
939 type |= CRYPTO_ALG_TYPE_COMPRESS;
940 mask |= CRYPTO_ALG_TYPE_MASK;
941
942 return crypto_has_alg(alg_name, type, mask);
943}
944
945static inline const char *crypto_comp_name(struct crypto_comp *tfm)
946{
947 return crypto_tfm_alg_name(crypto_comp_tfm(tfm));
948}
949
950static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm)
951{
952 return &crypto_comp_tfm(tfm)->crt_compress;
953}
954
955static inline int crypto_comp_compress(struct crypto_comp *tfm,
416 const u8 *src, unsigned int slen, 956 const u8 *src, unsigned int slen,
417 u8 *dst, unsigned int *dlen) 957 u8 *dst, unsigned int *dlen)
418{ 958{
419 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_COMPRESS); 959 return crypto_comp_crt(tfm)->cot_compress(tfm, src, slen, dst, dlen);
420 return tfm->crt_compress.cot_compress(tfm, src, slen, dst, dlen);
421} 960}
422 961
423static inline int crypto_comp_decompress(struct crypto_tfm *tfm, 962static inline int crypto_comp_decompress(struct crypto_comp *tfm,
424 const u8 *src, unsigned int slen, 963 const u8 *src, unsigned int slen,
425 u8 *dst, unsigned int *dlen) 964 u8 *dst, unsigned int *dlen)
426{ 965{
427 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_COMPRESS); 966 return crypto_comp_crt(tfm)->cot_decompress(tfm, src, slen, dst, dlen);
428 return tfm->crt_compress.cot_decompress(tfm, src, slen, dst, dlen);
429} 967}
430 968
431/*
432 * HMAC support.
433 */
434#ifdef CONFIG_CRYPTO_HMAC
435void crypto_hmac_init(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen);
436void crypto_hmac_update(struct crypto_tfm *tfm,
437 struct scatterlist *sg, unsigned int nsg);
438void crypto_hmac_final(struct crypto_tfm *tfm, u8 *key,
439 unsigned int *keylen, u8 *out);
440void crypto_hmac(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen,
441 struct scatterlist *sg, unsigned int nsg, u8 *out);
442#endif /* CONFIG_CRYPTO_HMAC */
443
444#endif /* _LINUX_CRYPTO_H */ 969#endif /* _LINUX_CRYPTO_H */
445 970
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 66ff545552f7..4efbd9c445f5 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -5,7 +5,7 @@
5#include <linux/mm.h> 5#include <linux/mm.h>
6#include <linux/string.h> 6#include <linux/string.h>
7 7
8static inline void sg_set_buf(struct scatterlist *sg, void *buf, 8static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
9 unsigned int buflen) 9 unsigned int buflen)
10{ 10{
11 sg->page = virt_to_page(buf); 11 sg->page = virt_to_page(buf);
@@ -13,7 +13,7 @@ static inline void sg_set_buf(struct scatterlist *sg, void *buf,
13 sg->length = buflen; 13 sg->length = buflen;
14} 14}
15 15
16static inline void sg_init_one(struct scatterlist *sg, void *buf, 16static inline void sg_init_one(struct scatterlist *sg, const void *buf,
17 unsigned int buflen) 17 unsigned int buflen)
18{ 18{
19 memset(sg, 0, sizeof(*sg)); 19 memset(sg, 0, sizeof(*sg));
diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h
index 1279280d7196..e30ba201910a 100644
--- a/include/linux/sunrpc/gss_krb5.h
+++ b/include/linux/sunrpc/gss_krb5.h
@@ -46,8 +46,8 @@ struct krb5_ctx {
46 unsigned char seed[16]; 46 unsigned char seed[16];
47 int signalg; 47 int signalg;
48 int sealalg; 48 int sealalg;
49 struct crypto_tfm *enc; 49 struct crypto_blkcipher *enc;
50 struct crypto_tfm *seq; 50 struct crypto_blkcipher *seq;
51 s32 endtime; 51 s32 endtime;
52 u32 seq_send; 52 u32 seq_send;
53 struct xdr_netobj mech_used; 53 struct xdr_netobj mech_used;
@@ -136,26 +136,27 @@ gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset,
136 136
137 137
138u32 138u32
139krb5_encrypt(struct crypto_tfm * key, 139krb5_encrypt(struct crypto_blkcipher *key,
140 void *iv, void *in, void *out, int length); 140 void *iv, void *in, void *out, int length);
141 141
142u32 142u32
143krb5_decrypt(struct crypto_tfm * key, 143krb5_decrypt(struct crypto_blkcipher *key,
144 void *iv, void *in, void *out, int length); 144 void *iv, void *in, void *out, int length);
145 145
146int 146int
147gss_encrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *outbuf, int offset, 147gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *outbuf,
148 struct page **pages); 148 int offset, struct page **pages);
149 149
150int 150int
151gss_decrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *inbuf, int offset); 151gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *inbuf,
152 int offset);
152 153
153s32 154s32
154krb5_make_seq_num(struct crypto_tfm * key, 155krb5_make_seq_num(struct crypto_blkcipher *key,
155 int direction, 156 int direction,
156 s32 seqnum, unsigned char *cksum, unsigned char *buf); 157 s32 seqnum, unsigned char *cksum, unsigned char *buf);
157 158
158s32 159s32
159krb5_get_seq_num(struct crypto_tfm * key, 160krb5_get_seq_num(struct crypto_blkcipher *key,
160 unsigned char *cksum, 161 unsigned char *cksum,
161 unsigned char *buf, int *direction, s32 * seqnum); 162 unsigned char *buf, int *direction, s32 * seqnum);
diff --git a/include/linux/sunrpc/gss_spkm3.h b/include/linux/sunrpc/gss_spkm3.h
index 336e218c2782..2cf3fbb40b4f 100644
--- a/include/linux/sunrpc/gss_spkm3.h
+++ b/include/linux/sunrpc/gss_spkm3.h
@@ -19,9 +19,9 @@ struct spkm3_ctx {
19 unsigned int req_flags ; 19 unsigned int req_flags ;
20 struct xdr_netobj share_key; 20 struct xdr_netobj share_key;
21 int conf_alg; 21 int conf_alg;
22 struct crypto_tfm* derived_conf_key; 22 struct crypto_blkcipher *derived_conf_key;
23 int intg_alg; 23 int intg_alg;
24 struct crypto_tfm* derived_integ_key; 24 struct crypto_blkcipher *derived_integ_key;
25 int keyestb_alg; /* alg used to get share_key */ 25 int keyestb_alg; /* alg used to get share_key */
26 int owf_alg; /* one way function */ 26 int owf_alg; /* one way function */
27}; 27};
diff --git a/include/net/ah.h b/include/net/ah.h
index ceff00afae09..8f257c159902 100644
--- a/include/net/ah.h
+++ b/include/net/ah.h
@@ -1,6 +1,7 @@
1#ifndef _NET_AH_H 1#ifndef _NET_AH_H
2#define _NET_AH_H 2#define _NET_AH_H
3 3
4#include <linux/crypto.h>
4#include <net/xfrm.h> 5#include <net/xfrm.h>
5 6
6/* This is the maximum truncated ICV length that we know of. */ 7/* This is the maximum truncated ICV length that we know of. */
@@ -14,22 +15,29 @@ struct ah_data
14 int icv_full_len; 15 int icv_full_len;
15 int icv_trunc_len; 16 int icv_trunc_len;
16 17
17 void (*icv)(struct ah_data*, 18 struct crypto_hash *tfm;
18 struct sk_buff *skb, u8 *icv);
19
20 struct crypto_tfm *tfm;
21}; 19};
22 20
23static inline void 21static inline int ah_mac_digest(struct ah_data *ahp, struct sk_buff *skb,
24ah_hmac_digest(struct ah_data *ahp, struct sk_buff *skb, u8 *auth_data) 22 u8 *auth_data)
25{ 23{
26 struct crypto_tfm *tfm = ahp->tfm; 24 struct hash_desc desc;
25 int err;
26
27 desc.tfm = ahp->tfm;
28 desc.flags = 0;
27 29
28 memset(auth_data, 0, ahp->icv_trunc_len); 30 memset(auth_data, 0, ahp->icv_trunc_len);
29 crypto_hmac_init(tfm, ahp->key, &ahp->key_len); 31 err = crypto_hash_init(&desc);
30 skb_icv_walk(skb, tfm, 0, skb->len, crypto_hmac_update); 32 if (unlikely(err))
31 crypto_hmac_final(tfm, ahp->key, &ahp->key_len, ahp->work_icv); 33 goto out;
32 memcpy(auth_data, ahp->work_icv, ahp->icv_trunc_len); 34 err = skb_icv_walk(skb, &desc, 0, skb->len, crypto_hash_update);
35 if (unlikely(err))
36 goto out;
37 err = crypto_hash_final(&desc, ahp->work_icv);
38
39out:
40 return err;
33} 41}
34 42
35#endif 43#endif
diff --git a/include/net/esp.h b/include/net/esp.h
index 90cd94fad7d9..064366d66eea 100644
--- a/include/net/esp.h
+++ b/include/net/esp.h
@@ -1,6 +1,7 @@
1#ifndef _NET_ESP_H 1#ifndef _NET_ESP_H
2#define _NET_ESP_H 2#define _NET_ESP_H
3 3
4#include <linux/crypto.h>
4#include <net/xfrm.h> 5#include <net/xfrm.h>
5#include <asm/scatterlist.h> 6#include <asm/scatterlist.h>
6 7
@@ -21,7 +22,7 @@ struct esp_data
21 * >= crypto_tfm_alg_ivsize(tfm). */ 22 * >= crypto_tfm_alg_ivsize(tfm). */
22 int ivlen; 23 int ivlen;
23 int padlen; /* 0..255 */ 24 int padlen; /* 0..255 */
24 struct crypto_tfm *tfm; /* crypto handle */ 25 struct crypto_blkcipher *tfm; /* crypto handle */
25 } conf; 26 } conf;
26 27
27 /* Integrity. It is active when icv_full_len != 0 */ 28 /* Integrity. It is active when icv_full_len != 0 */
@@ -34,7 +35,7 @@ struct esp_data
34 void (*icv)(struct esp_data*, 35 void (*icv)(struct esp_data*,
35 struct sk_buff *skb, 36 struct sk_buff *skb,
36 int offset, int len, u8 *icv); 37 int offset, int len, u8 *icv);
37 struct crypto_tfm *tfm; 38 struct crypto_hash *tfm;
38 } auth; 39 } auth;
39}; 40};
40 41
@@ -42,18 +43,22 @@ extern int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
42extern int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer); 43extern int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
43extern void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len); 44extern void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
44 45
45static inline void 46static inline int esp_mac_digest(struct esp_data *esp, struct sk_buff *skb,
46esp_hmac_digest(struct esp_data *esp, struct sk_buff *skb, int offset, 47 int offset, int len)
47 int len, u8 *auth_data)
48{ 48{
49 struct crypto_tfm *tfm = esp->auth.tfm; 49 struct hash_desc desc;
50 char *icv = esp->auth.work_icv; 50 int err;
51 51
52 memset(auth_data, 0, esp->auth.icv_trunc_len); 52 desc.tfm = esp->auth.tfm;
53 crypto_hmac_init(tfm, esp->auth.key, &esp->auth.key_len); 53 desc.flags = 0;
54 skb_icv_walk(skb, tfm, offset, len, crypto_hmac_update); 54
55 crypto_hmac_final(tfm, esp->auth.key, &esp->auth.key_len, icv); 55 err = crypto_hash_init(&desc);
56 memcpy(auth_data, icv, esp->auth.icv_trunc_len); 56 if (unlikely(err))
57 return err;
58 err = skb_icv_walk(skb, &desc, offset, len, crypto_hash_update);
59 if (unlikely(err))
60 return err;
61 return crypto_hash_final(&desc, esp->auth.work_icv);
57} 62}
58 63
59#endif 64#endif
diff --git a/include/net/ipcomp.h b/include/net/ipcomp.h
index e651a57ecdd5..87c1af3e5e82 100644
--- a/include/net/ipcomp.h
+++ b/include/net/ipcomp.h
@@ -1,11 +1,14 @@
1#ifndef _NET_IPCOMP_H 1#ifndef _NET_IPCOMP_H
2#define _NET_IPCOMP_H 2#define _NET_IPCOMP_H
3 3
4#include <linux/crypto.h>
5#include <linux/types.h>
6
4#define IPCOMP_SCRATCH_SIZE 65400 7#define IPCOMP_SCRATCH_SIZE 65400
5 8
6struct ipcomp_data { 9struct ipcomp_data {
7 u16 threshold; 10 u16 threshold;
8 struct crypto_tfm **tfms; 11 struct crypto_comp **tfms;
9}; 12};
10 13
11#endif 14#endif
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index c51541ee0247..57166bfdf8eb 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -312,9 +312,9 @@ enum { SCTP_MAX_GABS = 16 };
312 */ 312 */
313 313
314#if defined (CONFIG_SCTP_HMAC_MD5) 314#if defined (CONFIG_SCTP_HMAC_MD5)
315#define SCTP_COOKIE_HMAC_ALG "md5" 315#define SCTP_COOKIE_HMAC_ALG "hmac(md5)"
316#elif defined (CONFIG_SCTP_HMAC_SHA1) 316#elif defined (CONFIG_SCTP_HMAC_SHA1)
317#define SCTP_COOKIE_HMAC_ALG "sha1" 317#define SCTP_COOKIE_HMAC_ALG "hmac(sha1)"
318#else 318#else
319#define SCTP_COOKIE_HMAC_ALG NULL 319#define SCTP_COOKIE_HMAC_ALG NULL
320#endif 320#endif
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 92eae0e0f3f1..1c1abce5f6b6 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -330,17 +330,6 @@ static inline void sctp_v6_exit(void) { return; }
330 330
331#endif /* #if defined(CONFIG_IPV6) */ 331#endif /* #if defined(CONFIG_IPV6) */
332 332
333/* Some wrappers, in case crypto not available. */
334#if defined (CONFIG_CRYPTO_HMAC)
335#define sctp_crypto_alloc_tfm crypto_alloc_tfm
336#define sctp_crypto_free_tfm crypto_free_tfm
337#define sctp_crypto_hmac crypto_hmac
338#else
339#define sctp_crypto_alloc_tfm(x...) NULL
340#define sctp_crypto_free_tfm(x...)
341#define sctp_crypto_hmac(x...)
342#endif
343
344 333
345/* Map an association to an assoc_id. */ 334/* Map an association to an assoc_id. */
346static inline sctp_assoc_t sctp_assoc2id(const struct sctp_association *asoc) 335static inline sctp_assoc_t sctp_assoc2id(const struct sctp_association *asoc)
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index e5aa7ff1f5b5..0412e730c765 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -87,6 +87,7 @@ struct sctp_bind_addr;
87struct sctp_ulpq; 87struct sctp_ulpq;
88struct sctp_ep_common; 88struct sctp_ep_common;
89struct sctp_ssnmap; 89struct sctp_ssnmap;
90struct crypto_hash;
90 91
91 92
92#include <net/sctp/tsnmap.h> 93#include <net/sctp/tsnmap.h>
@@ -264,7 +265,7 @@ struct sctp_sock {
264 struct sctp_pf *pf; 265 struct sctp_pf *pf;
265 266
266 /* Access to HMAC transform. */ 267 /* Access to HMAC transform. */
267 struct crypto_tfm *hmac; 268 struct crypto_hash *hmac;
268 269
269 /* What is our base endpointer? */ 270 /* What is our base endpointer? */
270 struct sctp_endpoint *ep; 271 struct sctp_endpoint *ep;
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 9c5ee9f20b65..3ecd9fa1ed4b 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -8,7 +8,6 @@
8#include <linux/list.h> 8#include <linux/list.h>
9#include <linux/skbuff.h> 9#include <linux/skbuff.h>
10#include <linux/socket.h> 10#include <linux/socket.h>
11#include <linux/crypto.h>
12#include <linux/pfkeyv2.h> 11#include <linux/pfkeyv2.h>
13#include <linux/in6.h> 12#include <linux/in6.h>
14#include <linux/mutex.h> 13#include <linux/mutex.h>
@@ -855,6 +854,7 @@ struct xfrm_algo_comp_info {
855 854
856struct xfrm_algo_desc { 855struct xfrm_algo_desc {
857 char *name; 856 char *name;
857 char *compat;
858 u8 available:1; 858 u8 available:1;
859 union { 859 union {
860 struct xfrm_algo_auth_info auth; 860 struct xfrm_algo_auth_info auth;
@@ -984,11 +984,13 @@ extern struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe);
984extern struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe); 984extern struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe);
985extern struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe); 985extern struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe);
986 986
987struct crypto_tfm; 987struct hash_desc;
988typedef void (icv_update_fn_t)(struct crypto_tfm *, struct scatterlist *, unsigned int); 988struct scatterlist;
989typedef int (icv_update_fn_t)(struct hash_desc *, struct scatterlist *,
990 unsigned int);
989 991
990extern void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm, 992extern int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *tfm,
991 int offset, int len, icv_update_fn_t icv_update); 993 int offset, int len, icv_update_fn_t icv_update);
992 994
993static inline int xfrm_addr_cmp(xfrm_address_t *a, xfrm_address_t *b, 995static inline int xfrm_addr_cmp(xfrm_address_t *a, xfrm_address_t *b,
994 int family) 996 int family)
diff --git a/net/ieee80211/ieee80211_crypt_ccmp.c b/net/ieee80211/ieee80211_crypt_ccmp.c
index ed90a8af1444..fdfe7704a469 100644
--- a/net/ieee80211/ieee80211_crypt_ccmp.c
+++ b/net/ieee80211/ieee80211_crypt_ccmp.c
@@ -9,6 +9,7 @@
9 * more details. 9 * more details.
10 */ 10 */
11 11
12#include <linux/err.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/init.h> 14#include <linux/init.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
@@ -48,7 +49,7 @@ struct ieee80211_ccmp_data {
48 49
49 int key_idx; 50 int key_idx;
50 51
51 struct crypto_tfm *tfm; 52 struct crypto_cipher *tfm;
52 53
53 /* scratch buffers for virt_to_page() (crypto API) */ 54 /* scratch buffers for virt_to_page() (crypto API) */
54 u8 tx_b0[AES_BLOCK_LEN], tx_b[AES_BLOCK_LEN], 55 u8 tx_b0[AES_BLOCK_LEN], tx_b[AES_BLOCK_LEN],
@@ -56,20 +57,10 @@ struct ieee80211_ccmp_data {
56 u8 rx_b0[AES_BLOCK_LEN], rx_b[AES_BLOCK_LEN], rx_a[AES_BLOCK_LEN]; 57 u8 rx_b0[AES_BLOCK_LEN], rx_b[AES_BLOCK_LEN], rx_a[AES_BLOCK_LEN];
57}; 58};
58 59
59static void ieee80211_ccmp_aes_encrypt(struct crypto_tfm *tfm, 60static inline void ieee80211_ccmp_aes_encrypt(struct crypto_cipher *tfm,
60 const u8 pt[16], u8 ct[16]) 61 const u8 pt[16], u8 ct[16])
61{ 62{
62 struct scatterlist src, dst; 63 crypto_cipher_encrypt_one(tfm, ct, pt);
63
64 src.page = virt_to_page(pt);
65 src.offset = offset_in_page(pt);
66 src.length = AES_BLOCK_LEN;
67
68 dst.page = virt_to_page(ct);
69 dst.offset = offset_in_page(ct);
70 dst.length = AES_BLOCK_LEN;
71
72 crypto_cipher_encrypt(tfm, &dst, &src, AES_BLOCK_LEN);
73} 64}
74 65
75static void *ieee80211_ccmp_init(int key_idx) 66static void *ieee80211_ccmp_init(int key_idx)
@@ -81,10 +72,11 @@ static void *ieee80211_ccmp_init(int key_idx)
81 goto fail; 72 goto fail;
82 priv->key_idx = key_idx; 73 priv->key_idx = key_idx;
83 74
84 priv->tfm = crypto_alloc_tfm("aes", 0); 75 priv->tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
85 if (priv->tfm == NULL) { 76 if (IS_ERR(priv->tfm)) {
86 printk(KERN_DEBUG "ieee80211_crypt_ccmp: could not allocate " 77 printk(KERN_DEBUG "ieee80211_crypt_ccmp: could not allocate "
87 "crypto API aes\n"); 78 "crypto API aes\n");
79 priv->tfm = NULL;
88 goto fail; 80 goto fail;
89 } 81 }
90 82
@@ -93,7 +85,7 @@ static void *ieee80211_ccmp_init(int key_idx)
93 fail: 85 fail:
94 if (priv) { 86 if (priv) {
95 if (priv->tfm) 87 if (priv->tfm)
96 crypto_free_tfm(priv->tfm); 88 crypto_free_cipher(priv->tfm);
97 kfree(priv); 89 kfree(priv);
98 } 90 }
99 91
@@ -104,7 +96,7 @@ static void ieee80211_ccmp_deinit(void *priv)
104{ 96{
105 struct ieee80211_ccmp_data *_priv = priv; 97 struct ieee80211_ccmp_data *_priv = priv;
106 if (_priv && _priv->tfm) 98 if (_priv && _priv->tfm)
107 crypto_free_tfm(_priv->tfm); 99 crypto_free_cipher(_priv->tfm);
108 kfree(priv); 100 kfree(priv);
109} 101}
110 102
@@ -115,7 +107,7 @@ static inline void xor_block(u8 * b, u8 * a, size_t len)
115 b[i] ^= a[i]; 107 b[i] ^= a[i];
116} 108}
117 109
118static void ccmp_init_blocks(struct crypto_tfm *tfm, 110static void ccmp_init_blocks(struct crypto_cipher *tfm,
119 struct ieee80211_hdr_4addr *hdr, 111 struct ieee80211_hdr_4addr *hdr,
120 u8 * pn, size_t dlen, u8 * b0, u8 * auth, u8 * s0) 112 u8 * pn, size_t dlen, u8 * b0, u8 * auth, u8 * s0)
121{ 113{
@@ -377,7 +369,7 @@ static int ieee80211_ccmp_set_key(void *key, int len, u8 * seq, void *priv)
377{ 369{
378 struct ieee80211_ccmp_data *data = priv; 370 struct ieee80211_ccmp_data *data = priv;
379 int keyidx; 371 int keyidx;
380 struct crypto_tfm *tfm = data->tfm; 372 struct crypto_cipher *tfm = data->tfm;
381 373
382 keyidx = data->key_idx; 374 keyidx = data->key_idx;
383 memset(data, 0, sizeof(*data)); 375 memset(data, 0, sizeof(*data));
diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c
index 34dba0ba545d..407a17495b61 100644
--- a/net/ieee80211/ieee80211_crypt_tkip.c
+++ b/net/ieee80211/ieee80211_crypt_tkip.c
@@ -9,6 +9,7 @@
9 * more details. 9 * more details.
10 */ 10 */
11 11
12#include <linux/err.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/init.h> 14#include <linux/init.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
@@ -52,8 +53,8 @@ struct ieee80211_tkip_data {
52 53
53 int key_idx; 54 int key_idx;
54 55
55 struct crypto_tfm *tfm_arc4; 56 struct crypto_blkcipher *tfm_arc4;
56 struct crypto_tfm *tfm_michael; 57 struct crypto_hash *tfm_michael;
57 58
58 /* scratch buffers for virt_to_page() (crypto API) */ 59 /* scratch buffers for virt_to_page() (crypto API) */
59 u8 rx_hdr[16], tx_hdr[16]; 60 u8 rx_hdr[16], tx_hdr[16];
@@ -85,17 +86,21 @@ static void *ieee80211_tkip_init(int key_idx)
85 86
86 priv->key_idx = key_idx; 87 priv->key_idx = key_idx;
87 88
88 priv->tfm_arc4 = crypto_alloc_tfm("arc4", 0); 89 priv->tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
89 if (priv->tfm_arc4 == NULL) { 90 CRYPTO_ALG_ASYNC);
91 if (IS_ERR(priv->tfm_arc4)) {
90 printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " 92 printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
91 "crypto API arc4\n"); 93 "crypto API arc4\n");
94 priv->tfm_arc4 = NULL;
92 goto fail; 95 goto fail;
93 } 96 }
94 97
95 priv->tfm_michael = crypto_alloc_tfm("michael_mic", 0); 98 priv->tfm_michael = crypto_alloc_hash("michael_mic", 0,
96 if (priv->tfm_michael == NULL) { 99 CRYPTO_ALG_ASYNC);
100 if (IS_ERR(priv->tfm_michael)) {
97 printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " 101 printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
98 "crypto API michael_mic\n"); 102 "crypto API michael_mic\n");
103 priv->tfm_michael = NULL;
99 goto fail; 104 goto fail;
100 } 105 }
101 106
@@ -104,9 +109,9 @@ static void *ieee80211_tkip_init(int key_idx)
104 fail: 109 fail:
105 if (priv) { 110 if (priv) {
106 if (priv->tfm_michael) 111 if (priv->tfm_michael)
107 crypto_free_tfm(priv->tfm_michael); 112 crypto_free_hash(priv->tfm_michael);
108 if (priv->tfm_arc4) 113 if (priv->tfm_arc4)
109 crypto_free_tfm(priv->tfm_arc4); 114 crypto_free_blkcipher(priv->tfm_arc4);
110 kfree(priv); 115 kfree(priv);
111 } 116 }
112 117
@@ -117,9 +122,9 @@ static void ieee80211_tkip_deinit(void *priv)
117{ 122{
118 struct ieee80211_tkip_data *_priv = priv; 123 struct ieee80211_tkip_data *_priv = priv;
119 if (_priv && _priv->tfm_michael) 124 if (_priv && _priv->tfm_michael)
120 crypto_free_tfm(_priv->tfm_michael); 125 crypto_free_hash(_priv->tfm_michael);
121 if (_priv && _priv->tfm_arc4) 126 if (_priv && _priv->tfm_arc4)
122 crypto_free_tfm(_priv->tfm_arc4); 127 crypto_free_blkcipher(_priv->tfm_arc4);
123 kfree(priv); 128 kfree(priv);
124} 129}
125 130
@@ -318,6 +323,7 @@ static int ieee80211_tkip_hdr(struct sk_buff *skb, int hdr_len,
318static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) 323static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
319{ 324{
320 struct ieee80211_tkip_data *tkey = priv; 325 struct ieee80211_tkip_data *tkey = priv;
326 struct blkcipher_desc desc = { .tfm = tkey->tfm_arc4 };
321 int len; 327 int len;
322 u8 rc4key[16], *pos, *icv; 328 u8 rc4key[16], *pos, *icv;
323 u32 crc; 329 u32 crc;
@@ -351,18 +357,17 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
351 icv[2] = crc >> 16; 357 icv[2] = crc >> 16;
352 icv[3] = crc >> 24; 358 icv[3] = crc >> 24;
353 359
354 crypto_cipher_setkey(tkey->tfm_arc4, rc4key, 16); 360 crypto_blkcipher_setkey(tkey->tfm_arc4, rc4key, 16);
355 sg.page = virt_to_page(pos); 361 sg.page = virt_to_page(pos);
356 sg.offset = offset_in_page(pos); 362 sg.offset = offset_in_page(pos);
357 sg.length = len + 4; 363 sg.length = len + 4;
358 crypto_cipher_encrypt(tkey->tfm_arc4, &sg, &sg, len + 4); 364 return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
359
360 return 0;
361} 365}
362 366
363static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) 367static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
364{ 368{
365 struct ieee80211_tkip_data *tkey = priv; 369 struct ieee80211_tkip_data *tkey = priv;
370 struct blkcipher_desc desc = { .tfm = tkey->tfm_arc4 };
366 u8 rc4key[16]; 371 u8 rc4key[16];
367 u8 keyidx, *pos; 372 u8 keyidx, *pos;
368 u32 iv32; 373 u32 iv32;
@@ -434,11 +439,18 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
434 439
435 plen = skb->len - hdr_len - 12; 440 plen = skb->len - hdr_len - 12;
436 441
437 crypto_cipher_setkey(tkey->tfm_arc4, rc4key, 16); 442 crypto_blkcipher_setkey(tkey->tfm_arc4, rc4key, 16);
438 sg.page = virt_to_page(pos); 443 sg.page = virt_to_page(pos);
439 sg.offset = offset_in_page(pos); 444 sg.offset = offset_in_page(pos);
440 sg.length = plen + 4; 445 sg.length = plen + 4;
441 crypto_cipher_decrypt(tkey->tfm_arc4, &sg, &sg, plen + 4); 446 if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) {
447 if (net_ratelimit()) {
448 printk(KERN_DEBUG ": TKIP: failed to decrypt "
449 "received packet from " MAC_FMT "\n",
450 MAC_ARG(hdr->addr2));
451 }
452 return -7;
453 }
442 454
443 crc = ~crc32_le(~0, pos, plen); 455 crc = ~crc32_le(~0, pos, plen);
444 icv[0] = crc; 456 icv[0] = crc;
@@ -475,6 +487,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
475static int michael_mic(struct ieee80211_tkip_data *tkey, u8 * key, u8 * hdr, 487static int michael_mic(struct ieee80211_tkip_data *tkey, u8 * key, u8 * hdr,
476 u8 * data, size_t data_len, u8 * mic) 488 u8 * data, size_t data_len, u8 * mic)
477{ 489{
490 struct hash_desc desc;
478 struct scatterlist sg[2]; 491 struct scatterlist sg[2];
479 492
480 if (tkey->tfm_michael == NULL) { 493 if (tkey->tfm_michael == NULL) {
@@ -489,12 +502,12 @@ static int michael_mic(struct ieee80211_tkip_data *tkey, u8 * key, u8 * hdr,
489 sg[1].offset = offset_in_page(data); 502 sg[1].offset = offset_in_page(data);
490 sg[1].length = data_len; 503 sg[1].length = data_len;
491 504
492 crypto_digest_init(tkey->tfm_michael); 505 if (crypto_hash_setkey(tkey->tfm_michael, key, 8))
493 crypto_digest_setkey(tkey->tfm_michael, key, 8); 506 return -1;
494 crypto_digest_update(tkey->tfm_michael, sg, 2);
495 crypto_digest_final(tkey->tfm_michael, mic);
496 507
497 return 0; 508 desc.tfm = tkey->tfm_michael;
509 desc.flags = 0;
510 return crypto_hash_digest(&desc, sg, data_len + 16, mic);
498} 511}
499 512
500static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr) 513static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr)
@@ -618,8 +631,8 @@ static int ieee80211_tkip_set_key(void *key, int len, u8 * seq, void *priv)
618{ 631{
619 struct ieee80211_tkip_data *tkey = priv; 632 struct ieee80211_tkip_data *tkey = priv;
620 int keyidx; 633 int keyidx;
621 struct crypto_tfm *tfm = tkey->tfm_michael; 634 struct crypto_hash *tfm = tkey->tfm_michael;
622 struct crypto_tfm *tfm2 = tkey->tfm_arc4; 635 struct crypto_blkcipher *tfm2 = tkey->tfm_arc4;
623 636
624 keyidx = tkey->key_idx; 637 keyidx = tkey->key_idx;
625 memset(tkey, 0, sizeof(*tkey)); 638 memset(tkey, 0, sizeof(*tkey));
diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c
index 0ebf235f6939..3d46d3efe1dd 100644
--- a/net/ieee80211/ieee80211_crypt_wep.c
+++ b/net/ieee80211/ieee80211_crypt_wep.c
@@ -9,6 +9,7 @@
9 * more details. 9 * more details.
10 */ 10 */
11 11
12#include <linux/err.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/init.h> 14#include <linux/init.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
@@ -32,7 +33,7 @@ struct prism2_wep_data {
32 u8 key[WEP_KEY_LEN + 1]; 33 u8 key[WEP_KEY_LEN + 1];
33 u8 key_len; 34 u8 key_len;
34 u8 key_idx; 35 u8 key_idx;
35 struct crypto_tfm *tfm; 36 struct crypto_blkcipher *tfm;
36}; 37};
37 38
38static void *prism2_wep_init(int keyidx) 39static void *prism2_wep_init(int keyidx)
@@ -44,10 +45,11 @@ static void *prism2_wep_init(int keyidx)
44 goto fail; 45 goto fail;
45 priv->key_idx = keyidx; 46 priv->key_idx = keyidx;
46 47
47 priv->tfm = crypto_alloc_tfm("arc4", 0); 48 priv->tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
48 if (priv->tfm == NULL) { 49 if (IS_ERR(priv->tfm)) {
49 printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate " 50 printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate "
50 "crypto API arc4\n"); 51 "crypto API arc4\n");
52 priv->tfm = NULL;
51 goto fail; 53 goto fail;
52 } 54 }
53 55
@@ -59,7 +61,7 @@ static void *prism2_wep_init(int keyidx)
59 fail: 61 fail:
60 if (priv) { 62 if (priv) {
61 if (priv->tfm) 63 if (priv->tfm)
62 crypto_free_tfm(priv->tfm); 64 crypto_free_blkcipher(priv->tfm);
63 kfree(priv); 65 kfree(priv);
64 } 66 }
65 return NULL; 67 return NULL;
@@ -69,7 +71,7 @@ static void prism2_wep_deinit(void *priv)
69{ 71{
70 struct prism2_wep_data *_priv = priv; 72 struct prism2_wep_data *_priv = priv;
71 if (_priv && _priv->tfm) 73 if (_priv && _priv->tfm)
72 crypto_free_tfm(_priv->tfm); 74 crypto_free_blkcipher(_priv->tfm);
73 kfree(priv); 75 kfree(priv);
74} 76}
75 77
@@ -120,6 +122,7 @@ static int prism2_wep_build_iv(struct sk_buff *skb, int hdr_len,
120static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) 122static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
121{ 123{
122 struct prism2_wep_data *wep = priv; 124 struct prism2_wep_data *wep = priv;
125 struct blkcipher_desc desc = { .tfm = wep->tfm };
123 u32 crc, klen, len; 126 u32 crc, klen, len;
124 u8 *pos, *icv; 127 u8 *pos, *icv;
125 struct scatterlist sg; 128 struct scatterlist sg;
@@ -151,13 +154,11 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
151 icv[2] = crc >> 16; 154 icv[2] = crc >> 16;
152 icv[3] = crc >> 24; 155 icv[3] = crc >> 24;
153 156
154 crypto_cipher_setkey(wep->tfm, key, klen); 157 crypto_blkcipher_setkey(wep->tfm, key, klen);
155 sg.page = virt_to_page(pos); 158 sg.page = virt_to_page(pos);
156 sg.offset = offset_in_page(pos); 159 sg.offset = offset_in_page(pos);
157 sg.length = len + 4; 160 sg.length = len + 4;
158 crypto_cipher_encrypt(wep->tfm, &sg, &sg, len + 4); 161 return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
159
160 return 0;
161} 162}
162 163
163/* Perform WEP decryption on given buffer. Buffer includes whole WEP part of 164/* Perform WEP decryption on given buffer. Buffer includes whole WEP part of
@@ -170,6 +171,7 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
170static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv) 171static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
171{ 172{
172 struct prism2_wep_data *wep = priv; 173 struct prism2_wep_data *wep = priv;
174 struct blkcipher_desc desc = { .tfm = wep->tfm };
173 u32 crc, klen, plen; 175 u32 crc, klen, plen;
174 u8 key[WEP_KEY_LEN + 3]; 176 u8 key[WEP_KEY_LEN + 3];
175 u8 keyidx, *pos, icv[4]; 177 u8 keyidx, *pos, icv[4];
@@ -194,11 +196,12 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
194 /* Apply RC4 to data and compute CRC32 over decrypted data */ 196 /* Apply RC4 to data and compute CRC32 over decrypted data */
195 plen = skb->len - hdr_len - 8; 197 plen = skb->len - hdr_len - 8;
196 198
197 crypto_cipher_setkey(wep->tfm, key, klen); 199 crypto_blkcipher_setkey(wep->tfm, key, klen);
198 sg.page = virt_to_page(pos); 200 sg.page = virt_to_page(pos);
199 sg.offset = offset_in_page(pos); 201 sg.offset = offset_in_page(pos);
200 sg.length = plen + 4; 202 sg.length = plen + 4;
201 crypto_cipher_decrypt(wep->tfm, &sg, &sg, plen + 4); 203 if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4))
204 return -7;
202 205
203 crc = ~crc32_le(~0, pos, plen); 206 crc = ~crc32_le(~0, pos, plen);
204 icv[0] = crc; 207 icv[0] = crc;
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 8514106761b0..3b5d504a74be 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -386,6 +386,7 @@ config INET_ESP
386 select CRYPTO 386 select CRYPTO
387 select CRYPTO_HMAC 387 select CRYPTO_HMAC
388 select CRYPTO_MD5 388 select CRYPTO_MD5
389 select CRYPTO_CBC
389 select CRYPTO_SHA1 390 select CRYPTO_SHA1
390 select CRYPTO_DES 391 select CRYPTO_DES
391 ---help--- 392 ---help---
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 1366bc6ce6a5..2b98943e6b02 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -1,3 +1,4 @@
1#include <linux/err.h>
1#include <linux/module.h> 2#include <linux/module.h>
2#include <net/ip.h> 3#include <net/ip.h>
3#include <net/xfrm.h> 4#include <net/xfrm.h>
@@ -97,7 +98,10 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
97 ah->spi = x->id.spi; 98 ah->spi = x->id.spi;
98 ah->seq_no = htonl(++x->replay.oseq); 99 ah->seq_no = htonl(++x->replay.oseq);
99 xfrm_aevent_doreplay(x); 100 xfrm_aevent_doreplay(x);
100 ahp->icv(ahp, skb, ah->auth_data); 101 err = ah_mac_digest(ahp, skb, ah->auth_data);
102 if (err)
103 goto error;
104 memcpy(ah->auth_data, ahp->work_icv, ahp->icv_trunc_len);
101 105
102 top_iph->tos = iph->tos; 106 top_iph->tos = iph->tos;
103 top_iph->ttl = iph->ttl; 107 top_iph->ttl = iph->ttl;
@@ -119,6 +123,7 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
119{ 123{
120 int ah_hlen; 124 int ah_hlen;
121 int ihl; 125 int ihl;
126 int err = -EINVAL;
122 struct iphdr *iph; 127 struct iphdr *iph;
123 struct ip_auth_hdr *ah; 128 struct ip_auth_hdr *ah;
124 struct ah_data *ahp; 129 struct ah_data *ahp;
@@ -166,8 +171,11 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
166 171
167 memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); 172 memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
168 skb_push(skb, ihl); 173 skb_push(skb, ihl);
169 ahp->icv(ahp, skb, ah->auth_data); 174 err = ah_mac_digest(ahp, skb, ah->auth_data);
170 if (memcmp(ah->auth_data, auth_data, ahp->icv_trunc_len)) { 175 if (err)
176 goto out;
177 err = -EINVAL;
178 if (memcmp(ahp->work_icv, auth_data, ahp->icv_trunc_len)) {
171 x->stats.integrity_failed++; 179 x->stats.integrity_failed++;
172 goto out; 180 goto out;
173 } 181 }
@@ -179,7 +187,7 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
179 return 0; 187 return 0;
180 188
181out: 189out:
182 return -EINVAL; 190 return err;
183} 191}
184 192
185static void ah4_err(struct sk_buff *skb, u32 info) 193static void ah4_err(struct sk_buff *skb, u32 info)
@@ -204,6 +212,7 @@ static int ah_init_state(struct xfrm_state *x)
204{ 212{
205 struct ah_data *ahp = NULL; 213 struct ah_data *ahp = NULL;
206 struct xfrm_algo_desc *aalg_desc; 214 struct xfrm_algo_desc *aalg_desc;
215 struct crypto_hash *tfm;
207 216
208 if (!x->aalg) 217 if (!x->aalg)
209 goto error; 218 goto error;
@@ -221,24 +230,27 @@ static int ah_init_state(struct xfrm_state *x)
221 230
222 ahp->key = x->aalg->alg_key; 231 ahp->key = x->aalg->alg_key;
223 ahp->key_len = (x->aalg->alg_key_len+7)/8; 232 ahp->key_len = (x->aalg->alg_key_len+7)/8;
224 ahp->tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); 233 tfm = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC);
225 if (!ahp->tfm) 234 if (IS_ERR(tfm))
235 goto error;
236
237 ahp->tfm = tfm;
238 if (crypto_hash_setkey(tfm, ahp->key, ahp->key_len))
226 goto error; 239 goto error;
227 ahp->icv = ah_hmac_digest;
228 240
229 /* 241 /*
230 * Lookup the algorithm description maintained by xfrm_algo, 242 * Lookup the algorithm description maintained by xfrm_algo,
231 * verify crypto transform properties, and store information 243 * verify crypto transform properties, and store information
232 * we need for AH processing. This lookup cannot fail here 244 * we need for AH processing. This lookup cannot fail here
233 * after a successful crypto_alloc_tfm(). 245 * after a successful crypto_alloc_hash().
234 */ 246 */
235 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 247 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
236 BUG_ON(!aalg_desc); 248 BUG_ON(!aalg_desc);
237 249
238 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 250 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
239 crypto_tfm_alg_digestsize(ahp->tfm)) { 251 crypto_hash_digestsize(tfm)) {
240 printk(KERN_INFO "AH: %s digestsize %u != %hu\n", 252 printk(KERN_INFO "AH: %s digestsize %u != %hu\n",
241 x->aalg->alg_name, crypto_tfm_alg_digestsize(ahp->tfm), 253 x->aalg->alg_name, crypto_hash_digestsize(tfm),
242 aalg_desc->uinfo.auth.icv_fullbits/8); 254 aalg_desc->uinfo.auth.icv_fullbits/8);
243 goto error; 255 goto error;
244 } 256 }
@@ -262,7 +274,7 @@ static int ah_init_state(struct xfrm_state *x)
262error: 274error:
263 if (ahp) { 275 if (ahp) {
264 kfree(ahp->work_icv); 276 kfree(ahp->work_icv);
265 crypto_free_tfm(ahp->tfm); 277 crypto_free_hash(ahp->tfm);
266 kfree(ahp); 278 kfree(ahp);
267 } 279 }
268 return -EINVAL; 280 return -EINVAL;
@@ -277,7 +289,7 @@ static void ah_destroy(struct xfrm_state *x)
277 289
278 kfree(ahp->work_icv); 290 kfree(ahp->work_icv);
279 ahp->work_icv = NULL; 291 ahp->work_icv = NULL;
280 crypto_free_tfm(ahp->tfm); 292 crypto_free_hash(ahp->tfm);
281 ahp->tfm = NULL; 293 ahp->tfm = NULL;
282 kfree(ahp); 294 kfree(ahp);
283} 295}
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index fc2f8ce441de..b428489f6ccd 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -1,3 +1,4 @@
1#include <linux/err.h>
1#include <linux/module.h> 2#include <linux/module.h>
2#include <net/ip.h> 3#include <net/ip.h>
3#include <net/xfrm.h> 4#include <net/xfrm.h>
@@ -16,7 +17,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
16 int err; 17 int err;
17 struct iphdr *top_iph; 18 struct iphdr *top_iph;
18 struct ip_esp_hdr *esph; 19 struct ip_esp_hdr *esph;
19 struct crypto_tfm *tfm; 20 struct crypto_blkcipher *tfm;
21 struct blkcipher_desc desc;
20 struct esp_data *esp; 22 struct esp_data *esp;
21 struct sk_buff *trailer; 23 struct sk_buff *trailer;
22 int blksize; 24 int blksize;
@@ -36,7 +38,9 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
36 esp = x->data; 38 esp = x->data;
37 alen = esp->auth.icv_trunc_len; 39 alen = esp->auth.icv_trunc_len;
38 tfm = esp->conf.tfm; 40 tfm = esp->conf.tfm;
39 blksize = ALIGN(crypto_tfm_alg_blocksize(tfm), 4); 41 desc.tfm = tfm;
42 desc.flags = 0;
43 blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
40 clen = ALIGN(clen + 2, blksize); 44 clen = ALIGN(clen + 2, blksize);
41 if (esp->conf.padlen) 45 if (esp->conf.padlen)
42 clen = ALIGN(clen, esp->conf.padlen); 46 clen = ALIGN(clen, esp->conf.padlen);
@@ -92,7 +96,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
92 xfrm_aevent_doreplay(x); 96 xfrm_aevent_doreplay(x);
93 97
94 if (esp->conf.ivlen) 98 if (esp->conf.ivlen)
95 crypto_cipher_set_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); 99 crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
96 100
97 do { 101 do {
98 struct scatterlist *sg = &esp->sgbuf[0]; 102 struct scatterlist *sg = &esp->sgbuf[0];
@@ -103,26 +107,27 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
103 goto error; 107 goto error;
104 } 108 }
105 skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); 109 skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
106 crypto_cipher_encrypt(tfm, sg, sg, clen); 110 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
107 if (unlikely(sg != &esp->sgbuf[0])) 111 if (unlikely(sg != &esp->sgbuf[0]))
108 kfree(sg); 112 kfree(sg);
109 } while (0); 113 } while (0);
110 114
115 if (unlikely(err))
116 goto error;
117
111 if (esp->conf.ivlen) { 118 if (esp->conf.ivlen) {
112 memcpy(esph->enc_data, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); 119 memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen);
113 crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); 120 crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
114 } 121 }
115 122
116 if (esp->auth.icv_full_len) { 123 if (esp->auth.icv_full_len) {
117 esp->auth.icv(esp, skb, (u8*)esph-skb->data, 124 err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data,
118 sizeof(struct ip_esp_hdr) + esp->conf.ivlen+clen, trailer->tail); 125 sizeof(*esph) + esp->conf.ivlen + clen);
119 pskb_put(skb, trailer, alen); 126 memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen);
120 } 127 }
121 128
122 ip_send_check(top_iph); 129 ip_send_check(top_iph);
123 130
124 err = 0;
125
126error: 131error:
127 return err; 132 return err;
128} 133}
@@ -137,8 +142,10 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
137 struct iphdr *iph; 142 struct iphdr *iph;
138 struct ip_esp_hdr *esph; 143 struct ip_esp_hdr *esph;
139 struct esp_data *esp = x->data; 144 struct esp_data *esp = x->data;
145 struct crypto_blkcipher *tfm = esp->conf.tfm;
146 struct blkcipher_desc desc = { .tfm = tfm };
140 struct sk_buff *trailer; 147 struct sk_buff *trailer;
141 int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); 148 int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
142 int alen = esp->auth.icv_trunc_len; 149 int alen = esp->auth.icv_trunc_len;
143 int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen; 150 int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen;
144 int nfrags; 151 int nfrags;
@@ -146,6 +153,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
146 u8 nexthdr[2]; 153 u8 nexthdr[2];
147 struct scatterlist *sg; 154 struct scatterlist *sg;
148 int padlen; 155 int padlen;
156 int err;
149 157
150 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr))) 158 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr)))
151 goto out; 159 goto out;
@@ -155,15 +163,16 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
155 163
156 /* If integrity check is required, do this. */ 164 /* If integrity check is required, do this. */
157 if (esp->auth.icv_full_len) { 165 if (esp->auth.icv_full_len) {
158 u8 sum[esp->auth.icv_full_len]; 166 u8 sum[alen];
159 u8 sum1[alen];
160
161 esp->auth.icv(esp, skb, 0, skb->len-alen, sum);
162 167
163 if (skb_copy_bits(skb, skb->len-alen, sum1, alen)) 168 err = esp_mac_digest(esp, skb, 0, skb->len - alen);
169 if (err)
170 goto out;
171
172 if (skb_copy_bits(skb, skb->len - alen, sum, alen))
164 BUG(); 173 BUG();
165 174
166 if (unlikely(memcmp(sum, sum1, alen))) { 175 if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) {
167 x->stats.integrity_failed++; 176 x->stats.integrity_failed++;
168 goto out; 177 goto out;
169 } 178 }
@@ -178,7 +187,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
178 187
179 /* Get ivec. This can be wrong, check against another impls. */ 188 /* Get ivec. This can be wrong, check against another impls. */
180 if (esp->conf.ivlen) 189 if (esp->conf.ivlen)
181 crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm)); 190 crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen);
182 191
183 sg = &esp->sgbuf[0]; 192 sg = &esp->sgbuf[0];
184 193
@@ -188,9 +197,11 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
188 goto out; 197 goto out;
189 } 198 }
190 skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen); 199 skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen);
191 crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen); 200 err = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
192 if (unlikely(sg != &esp->sgbuf[0])) 201 if (unlikely(sg != &esp->sgbuf[0]))
193 kfree(sg); 202 kfree(sg);
203 if (unlikely(err))
204 return err;
194 205
195 if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2)) 206 if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
196 BUG(); 207 BUG();
@@ -254,7 +265,7 @@ out:
254static u32 esp4_get_max_size(struct xfrm_state *x, int mtu) 265static u32 esp4_get_max_size(struct xfrm_state *x, int mtu)
255{ 266{
256 struct esp_data *esp = x->data; 267 struct esp_data *esp = x->data;
257 u32 blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); 268 u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4);
258 269
259 if (x->props.mode) { 270 if (x->props.mode) {
260 mtu = ALIGN(mtu + 2, blksize); 271 mtu = ALIGN(mtu + 2, blksize);
@@ -293,11 +304,11 @@ static void esp_destroy(struct xfrm_state *x)
293 if (!esp) 304 if (!esp)
294 return; 305 return;
295 306
296 crypto_free_tfm(esp->conf.tfm); 307 crypto_free_blkcipher(esp->conf.tfm);
297 esp->conf.tfm = NULL; 308 esp->conf.tfm = NULL;
298 kfree(esp->conf.ivec); 309 kfree(esp->conf.ivec);
299 esp->conf.ivec = NULL; 310 esp->conf.ivec = NULL;
300 crypto_free_tfm(esp->auth.tfm); 311 crypto_free_hash(esp->auth.tfm);
301 esp->auth.tfm = NULL; 312 esp->auth.tfm = NULL;
302 kfree(esp->auth.work_icv); 313 kfree(esp->auth.work_icv);
303 esp->auth.work_icv = NULL; 314 esp->auth.work_icv = NULL;
@@ -307,6 +318,7 @@ static void esp_destroy(struct xfrm_state *x)
307static int esp_init_state(struct xfrm_state *x) 318static int esp_init_state(struct xfrm_state *x)
308{ 319{
309 struct esp_data *esp = NULL; 320 struct esp_data *esp = NULL;
321 struct crypto_blkcipher *tfm;
310 322
311 /* null auth and encryption can have zero length keys */ 323 /* null auth and encryption can have zero length keys */
312 if (x->aalg) { 324 if (x->aalg) {
@@ -322,22 +334,27 @@ static int esp_init_state(struct xfrm_state *x)
322 334
323 if (x->aalg) { 335 if (x->aalg) {
324 struct xfrm_algo_desc *aalg_desc; 336 struct xfrm_algo_desc *aalg_desc;
337 struct crypto_hash *hash;
325 338
326 esp->auth.key = x->aalg->alg_key; 339 esp->auth.key = x->aalg->alg_key;
327 esp->auth.key_len = (x->aalg->alg_key_len+7)/8; 340 esp->auth.key_len = (x->aalg->alg_key_len+7)/8;
328 esp->auth.tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); 341 hash = crypto_alloc_hash(x->aalg->alg_name, 0,
329 if (esp->auth.tfm == NULL) 342 CRYPTO_ALG_ASYNC);
343 if (IS_ERR(hash))
344 goto error;
345
346 esp->auth.tfm = hash;
347 if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len))
330 goto error; 348 goto error;
331 esp->auth.icv = esp_hmac_digest;
332 349
333 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 350 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
334 BUG_ON(!aalg_desc); 351 BUG_ON(!aalg_desc);
335 352
336 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 353 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
337 crypto_tfm_alg_digestsize(esp->auth.tfm)) { 354 crypto_hash_digestsize(hash)) {
338 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", 355 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
339 x->aalg->alg_name, 356 x->aalg->alg_name,
340 crypto_tfm_alg_digestsize(esp->auth.tfm), 357 crypto_hash_digestsize(hash),
341 aalg_desc->uinfo.auth.icv_fullbits/8); 358 aalg_desc->uinfo.auth.icv_fullbits/8);
342 goto error; 359 goto error;
343 } 360 }
@@ -351,13 +368,11 @@ static int esp_init_state(struct xfrm_state *x)
351 } 368 }
352 esp->conf.key = x->ealg->alg_key; 369 esp->conf.key = x->ealg->alg_key;
353 esp->conf.key_len = (x->ealg->alg_key_len+7)/8; 370 esp->conf.key_len = (x->ealg->alg_key_len+7)/8;
354 if (x->props.ealgo == SADB_EALG_NULL) 371 tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC);
355 esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_ECB); 372 if (IS_ERR(tfm))
356 else
357 esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_CBC);
358 if (esp->conf.tfm == NULL)
359 goto error; 373 goto error;
360 esp->conf.ivlen = crypto_tfm_alg_ivsize(esp->conf.tfm); 374 esp->conf.tfm = tfm;
375 esp->conf.ivlen = crypto_blkcipher_ivsize(tfm);
361 esp->conf.padlen = 0; 376 esp->conf.padlen = 0;
362 if (esp->conf.ivlen) { 377 if (esp->conf.ivlen) {
363 esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL); 378 esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL);
@@ -365,7 +380,7 @@ static int esp_init_state(struct xfrm_state *x)
365 goto error; 380 goto error;
366 get_random_bytes(esp->conf.ivec, esp->conf.ivlen); 381 get_random_bytes(esp->conf.ivec, esp->conf.ivlen);
367 } 382 }
368 if (crypto_cipher_setkey(esp->conf.tfm, esp->conf.key, esp->conf.key_len)) 383 if (crypto_blkcipher_setkey(tfm, esp->conf.key, esp->conf.key_len))
369 goto error; 384 goto error;
370 x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen; 385 x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen;
371 if (x->props.mode) 386 if (x->props.mode)
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index a0c28b2b756e..5bb9c9f03fb6 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -32,7 +32,7 @@
32 32
33struct ipcomp_tfms { 33struct ipcomp_tfms {
34 struct list_head list; 34 struct list_head list;
35 struct crypto_tfm **tfms; 35 struct crypto_comp **tfms;
36 int users; 36 int users;
37}; 37};
38 38
@@ -46,7 +46,7 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
46 int err, plen, dlen; 46 int err, plen, dlen;
47 struct ipcomp_data *ipcd = x->data; 47 struct ipcomp_data *ipcd = x->data;
48 u8 *start, *scratch; 48 u8 *start, *scratch;
49 struct crypto_tfm *tfm; 49 struct crypto_comp *tfm;
50 int cpu; 50 int cpu;
51 51
52 plen = skb->len; 52 plen = skb->len;
@@ -107,7 +107,7 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
107 struct iphdr *iph = skb->nh.iph; 107 struct iphdr *iph = skb->nh.iph;
108 struct ipcomp_data *ipcd = x->data; 108 struct ipcomp_data *ipcd = x->data;
109 u8 *start, *scratch; 109 u8 *start, *scratch;
110 struct crypto_tfm *tfm; 110 struct crypto_comp *tfm;
111 int cpu; 111 int cpu;
112 112
113 ihlen = iph->ihl * 4; 113 ihlen = iph->ihl * 4;
@@ -302,7 +302,7 @@ static void **ipcomp_alloc_scratches(void)
302 return scratches; 302 return scratches;
303} 303}
304 304
305static void ipcomp_free_tfms(struct crypto_tfm **tfms) 305static void ipcomp_free_tfms(struct crypto_comp **tfms)
306{ 306{
307 struct ipcomp_tfms *pos; 307 struct ipcomp_tfms *pos;
308 int cpu; 308 int cpu;
@@ -324,28 +324,28 @@ static void ipcomp_free_tfms(struct crypto_tfm **tfms)
324 return; 324 return;
325 325
326 for_each_possible_cpu(cpu) { 326 for_each_possible_cpu(cpu) {
327 struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu); 327 struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu);
328 crypto_free_tfm(tfm); 328 crypto_free_comp(tfm);
329 } 329 }
330 free_percpu(tfms); 330 free_percpu(tfms);
331} 331}
332 332
333static struct crypto_tfm **ipcomp_alloc_tfms(const char *alg_name) 333static struct crypto_comp **ipcomp_alloc_tfms(const char *alg_name)
334{ 334{
335 struct ipcomp_tfms *pos; 335 struct ipcomp_tfms *pos;
336 struct crypto_tfm **tfms; 336 struct crypto_comp **tfms;
337 int cpu; 337 int cpu;
338 338
339 /* This can be any valid CPU ID so we don't need locking. */ 339 /* This can be any valid CPU ID so we don't need locking. */
340 cpu = raw_smp_processor_id(); 340 cpu = raw_smp_processor_id();
341 341
342 list_for_each_entry(pos, &ipcomp_tfms_list, list) { 342 list_for_each_entry(pos, &ipcomp_tfms_list, list) {
343 struct crypto_tfm *tfm; 343 struct crypto_comp *tfm;
344 344
345 tfms = pos->tfms; 345 tfms = pos->tfms;
346 tfm = *per_cpu_ptr(tfms, cpu); 346 tfm = *per_cpu_ptr(tfms, cpu);
347 347
348 if (!strcmp(crypto_tfm_alg_name(tfm), alg_name)) { 348 if (!strcmp(crypto_comp_name(tfm), alg_name)) {
349 pos->users++; 349 pos->users++;
350 return tfms; 350 return tfms;
351 } 351 }
@@ -359,12 +359,13 @@ static struct crypto_tfm **ipcomp_alloc_tfms(const char *alg_name)
359 INIT_LIST_HEAD(&pos->list); 359 INIT_LIST_HEAD(&pos->list);
360 list_add(&pos->list, &ipcomp_tfms_list); 360 list_add(&pos->list, &ipcomp_tfms_list);
361 361
362 pos->tfms = tfms = alloc_percpu(struct crypto_tfm *); 362 pos->tfms = tfms = alloc_percpu(struct crypto_comp *);
363 if (!tfms) 363 if (!tfms)
364 goto error; 364 goto error;
365 365
366 for_each_possible_cpu(cpu) { 366 for_each_possible_cpu(cpu) {
367 struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0); 367 struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0,
368 CRYPTO_ALG_ASYNC);
368 if (!tfm) 369 if (!tfm)
369 goto error; 370 goto error;
370 *per_cpu_ptr(tfms, cpu) = tfm; 371 *per_cpu_ptr(tfms, cpu) = tfm;
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index e923d4dea418..0ba06c0c5d39 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -77,6 +77,7 @@ config INET6_ESP
77 select CRYPTO 77 select CRYPTO
78 select CRYPTO_HMAC 78 select CRYPTO_HMAC
79 select CRYPTO_MD5 79 select CRYPTO_MD5
80 select CRYPTO_CBC
80 select CRYPTO_SHA1 81 select CRYPTO_SHA1
81 select CRYPTO_DES 82 select CRYPTO_DES
82 ---help--- 83 ---help---
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 9d4831bd4335..00ffa7bc6c9f 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -213,7 +213,10 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
213 ah->spi = x->id.spi; 213 ah->spi = x->id.spi;
214 ah->seq_no = htonl(++x->replay.oseq); 214 ah->seq_no = htonl(++x->replay.oseq);
215 xfrm_aevent_doreplay(x); 215 xfrm_aevent_doreplay(x);
216 ahp->icv(ahp, skb, ah->auth_data); 216 err = ah_mac_digest(ahp, skb, ah->auth_data);
217 if (err)
218 goto error_free_iph;
219 memcpy(ah->auth_data, ahp->work_icv, ahp->icv_trunc_len);
217 220
218 err = 0; 221 err = 0;
219 222
@@ -251,6 +254,7 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
251 u16 hdr_len; 254 u16 hdr_len;
252 u16 ah_hlen; 255 u16 ah_hlen;
253 int nexthdr; 256 int nexthdr;
257 int err = -EINVAL;
254 258
255 if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr))) 259 if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr)))
256 goto out; 260 goto out;
@@ -292,8 +296,11 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
292 memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); 296 memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
293 memset(ah->auth_data, 0, ahp->icv_trunc_len); 297 memset(ah->auth_data, 0, ahp->icv_trunc_len);
294 skb_push(skb, hdr_len); 298 skb_push(skb, hdr_len);
295 ahp->icv(ahp, skb, ah->auth_data); 299 err = ah_mac_digest(ahp, skb, ah->auth_data);
296 if (memcmp(ah->auth_data, auth_data, ahp->icv_trunc_len)) { 300 if (err)
301 goto free_out;
302 err = -EINVAL;
303 if (memcmp(ahp->work_icv, auth_data, ahp->icv_trunc_len)) {
297 LIMIT_NETDEBUG(KERN_WARNING "ipsec ah authentication error\n"); 304 LIMIT_NETDEBUG(KERN_WARNING "ipsec ah authentication error\n");
298 x->stats.integrity_failed++; 305 x->stats.integrity_failed++;
299 goto free_out; 306 goto free_out;
@@ -310,7 +317,7 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
310free_out: 317free_out:
311 kfree(tmp_hdr); 318 kfree(tmp_hdr);
312out: 319out:
313 return -EINVAL; 320 return err;
314} 321}
315 322
316static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 323static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
@@ -338,6 +345,7 @@ static int ah6_init_state(struct xfrm_state *x)
338{ 345{
339 struct ah_data *ahp = NULL; 346 struct ah_data *ahp = NULL;
340 struct xfrm_algo_desc *aalg_desc; 347 struct xfrm_algo_desc *aalg_desc;
348 struct crypto_hash *tfm;
341 349
342 if (!x->aalg) 350 if (!x->aalg)
343 goto error; 351 goto error;
@@ -355,24 +363,27 @@ static int ah6_init_state(struct xfrm_state *x)
355 363
356 ahp->key = x->aalg->alg_key; 364 ahp->key = x->aalg->alg_key;
357 ahp->key_len = (x->aalg->alg_key_len+7)/8; 365 ahp->key_len = (x->aalg->alg_key_len+7)/8;
358 ahp->tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); 366 tfm = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC);
359 if (!ahp->tfm) 367 if (IS_ERR(tfm))
368 goto error;
369
370 ahp->tfm = tfm;
371 if (crypto_hash_setkey(tfm, ahp->key, ahp->key_len))
360 goto error; 372 goto error;
361 ahp->icv = ah_hmac_digest;
362 373
363 /* 374 /*
364 * Lookup the algorithm description maintained by xfrm_algo, 375 * Lookup the algorithm description maintained by xfrm_algo,
365 * verify crypto transform properties, and store information 376 * verify crypto transform properties, and store information
366 * we need for AH processing. This lookup cannot fail here 377 * we need for AH processing. This lookup cannot fail here
367 * after a successful crypto_alloc_tfm(). 378 * after a successful crypto_alloc_hash().
368 */ 379 */
369 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 380 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
370 BUG_ON(!aalg_desc); 381 BUG_ON(!aalg_desc);
371 382
372 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 383 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
373 crypto_tfm_alg_digestsize(ahp->tfm)) { 384 crypto_hash_digestsize(tfm)) {
374 printk(KERN_INFO "AH: %s digestsize %u != %hu\n", 385 printk(KERN_INFO "AH: %s digestsize %u != %hu\n",
375 x->aalg->alg_name, crypto_tfm_alg_digestsize(ahp->tfm), 386 x->aalg->alg_name, crypto_hash_digestsize(tfm),
376 aalg_desc->uinfo.auth.icv_fullbits/8); 387 aalg_desc->uinfo.auth.icv_fullbits/8);
377 goto error; 388 goto error;
378 } 389 }
@@ -396,7 +407,7 @@ static int ah6_init_state(struct xfrm_state *x)
396error: 407error:
397 if (ahp) { 408 if (ahp) {
398 kfree(ahp->work_icv); 409 kfree(ahp->work_icv);
399 crypto_free_tfm(ahp->tfm); 410 crypto_free_hash(ahp->tfm);
400 kfree(ahp); 411 kfree(ahp);
401 } 412 }
402 return -EINVAL; 413 return -EINVAL;
@@ -411,7 +422,7 @@ static void ah6_destroy(struct xfrm_state *x)
411 422
412 kfree(ahp->work_icv); 423 kfree(ahp->work_icv);
413 ahp->work_icv = NULL; 424 ahp->work_icv = NULL;
414 crypto_free_tfm(ahp->tfm); 425 crypto_free_hash(ahp->tfm);
415 ahp->tfm = NULL; 426 ahp->tfm = NULL;
416 kfree(ahp); 427 kfree(ahp);
417} 428}
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index a278d5e862fe..2ebfd281e721 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -24,6 +24,7 @@
24 * This file is derived from net/ipv4/esp.c 24 * This file is derived from net/ipv4/esp.c
25 */ 25 */
26 26
27#include <linux/err.h>
27#include <linux/module.h> 28#include <linux/module.h>
28#include <net/ip.h> 29#include <net/ip.h>
29#include <net/xfrm.h> 30#include <net/xfrm.h>
@@ -44,7 +45,8 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
44 int hdr_len; 45 int hdr_len;
45 struct ipv6hdr *top_iph; 46 struct ipv6hdr *top_iph;
46 struct ipv6_esp_hdr *esph; 47 struct ipv6_esp_hdr *esph;
47 struct crypto_tfm *tfm; 48 struct crypto_blkcipher *tfm;
49 struct blkcipher_desc desc;
48 struct esp_data *esp; 50 struct esp_data *esp;
49 struct sk_buff *trailer; 51 struct sk_buff *trailer;
50 int blksize; 52 int blksize;
@@ -67,7 +69,9 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
67 69
68 alen = esp->auth.icv_trunc_len; 70 alen = esp->auth.icv_trunc_len;
69 tfm = esp->conf.tfm; 71 tfm = esp->conf.tfm;
70 blksize = ALIGN(crypto_tfm_alg_blocksize(tfm), 4); 72 desc.tfm = tfm;
73 desc.flags = 0;
74 blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
71 clen = ALIGN(clen + 2, blksize); 75 clen = ALIGN(clen + 2, blksize);
72 if (esp->conf.padlen) 76 if (esp->conf.padlen)
73 clen = ALIGN(clen, esp->conf.padlen); 77 clen = ALIGN(clen, esp->conf.padlen);
@@ -96,7 +100,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
96 xfrm_aevent_doreplay(x); 100 xfrm_aevent_doreplay(x);
97 101
98 if (esp->conf.ivlen) 102 if (esp->conf.ivlen)
99 crypto_cipher_set_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); 103 crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
100 104
101 do { 105 do {
102 struct scatterlist *sg = &esp->sgbuf[0]; 106 struct scatterlist *sg = &esp->sgbuf[0];
@@ -107,24 +111,25 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
107 goto error; 111 goto error;
108 } 112 }
109 skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); 113 skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
110 crypto_cipher_encrypt(tfm, sg, sg, clen); 114 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
111 if (unlikely(sg != &esp->sgbuf[0])) 115 if (unlikely(sg != &esp->sgbuf[0]))
112 kfree(sg); 116 kfree(sg);
113 } while (0); 117 } while (0);
114 118
119 if (unlikely(err))
120 goto error;
121
115 if (esp->conf.ivlen) { 122 if (esp->conf.ivlen) {
116 memcpy(esph->enc_data, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); 123 memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen);
117 crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); 124 crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
118 } 125 }
119 126
120 if (esp->auth.icv_full_len) { 127 if (esp->auth.icv_full_len) {
121 esp->auth.icv(esp, skb, (u8*)esph-skb->data, 128 err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data,
122 sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen+clen, trailer->tail); 129 sizeof(*esph) + esp->conf.ivlen + clen);
123 pskb_put(skb, trailer, alen); 130 memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen);
124 } 131 }
125 132
126 err = 0;
127
128error: 133error:
129 return err; 134 return err;
130} 135}
@@ -134,8 +139,10 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
134 struct ipv6hdr *iph; 139 struct ipv6hdr *iph;
135 struct ipv6_esp_hdr *esph; 140 struct ipv6_esp_hdr *esph;
136 struct esp_data *esp = x->data; 141 struct esp_data *esp = x->data;
142 struct crypto_blkcipher *tfm = esp->conf.tfm;
143 struct blkcipher_desc desc = { .tfm = tfm };
137 struct sk_buff *trailer; 144 struct sk_buff *trailer;
138 int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); 145 int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
139 int alen = esp->auth.icv_trunc_len; 146 int alen = esp->auth.icv_trunc_len;
140 int elen = skb->len - sizeof(struct ipv6_esp_hdr) - esp->conf.ivlen - alen; 147 int elen = skb->len - sizeof(struct ipv6_esp_hdr) - esp->conf.ivlen - alen;
141 148
@@ -155,15 +162,16 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
155 162
156 /* If integrity check is required, do this. */ 163 /* If integrity check is required, do this. */
157 if (esp->auth.icv_full_len) { 164 if (esp->auth.icv_full_len) {
158 u8 sum[esp->auth.icv_full_len]; 165 u8 sum[alen];
159 u8 sum1[alen];
160 166
161 esp->auth.icv(esp, skb, 0, skb->len-alen, sum); 167 ret = esp_mac_digest(esp, skb, 0, skb->len - alen);
168 if (ret)
169 goto out;
162 170
163 if (skb_copy_bits(skb, skb->len-alen, sum1, alen)) 171 if (skb_copy_bits(skb, skb->len - alen, sum, alen))
164 BUG(); 172 BUG();
165 173
166 if (unlikely(memcmp(sum, sum1, alen))) { 174 if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) {
167 x->stats.integrity_failed++; 175 x->stats.integrity_failed++;
168 ret = -EINVAL; 176 ret = -EINVAL;
169 goto out; 177 goto out;
@@ -182,7 +190,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
182 190
183 /* Get ivec. This can be wrong, check against another impls. */ 191 /* Get ivec. This can be wrong, check against another impls. */
184 if (esp->conf.ivlen) 192 if (esp->conf.ivlen)
185 crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm)); 193 crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen);
186 194
187 { 195 {
188 u8 nexthdr[2]; 196 u8 nexthdr[2];
@@ -197,9 +205,11 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
197 } 205 }
198 } 206 }
199 skb_to_sgvec(skb, sg, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen, elen); 207 skb_to_sgvec(skb, sg, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen, elen);
200 crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen); 208 ret = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
201 if (unlikely(sg != &esp->sgbuf[0])) 209 if (unlikely(sg != &esp->sgbuf[0]))
202 kfree(sg); 210 kfree(sg);
211 if (unlikely(ret))
212 goto out;
203 213
204 if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2)) 214 if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
205 BUG(); 215 BUG();
@@ -225,7 +235,7 @@ out:
225static u32 esp6_get_max_size(struct xfrm_state *x, int mtu) 235static u32 esp6_get_max_size(struct xfrm_state *x, int mtu)
226{ 236{
227 struct esp_data *esp = x->data; 237 struct esp_data *esp = x->data;
228 u32 blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); 238 u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4);
229 239
230 if (x->props.mode) { 240 if (x->props.mode) {
231 mtu = ALIGN(mtu + 2, blksize); 241 mtu = ALIGN(mtu + 2, blksize);
@@ -266,11 +276,11 @@ static void esp6_destroy(struct xfrm_state *x)
266 if (!esp) 276 if (!esp)
267 return; 277 return;
268 278
269 crypto_free_tfm(esp->conf.tfm); 279 crypto_free_blkcipher(esp->conf.tfm);
270 esp->conf.tfm = NULL; 280 esp->conf.tfm = NULL;
271 kfree(esp->conf.ivec); 281 kfree(esp->conf.ivec);
272 esp->conf.ivec = NULL; 282 esp->conf.ivec = NULL;
273 crypto_free_tfm(esp->auth.tfm); 283 crypto_free_hash(esp->auth.tfm);
274 esp->auth.tfm = NULL; 284 esp->auth.tfm = NULL;
275 kfree(esp->auth.work_icv); 285 kfree(esp->auth.work_icv);
276 esp->auth.work_icv = NULL; 286 esp->auth.work_icv = NULL;
@@ -280,6 +290,7 @@ static void esp6_destroy(struct xfrm_state *x)
280static int esp6_init_state(struct xfrm_state *x) 290static int esp6_init_state(struct xfrm_state *x)
281{ 291{
282 struct esp_data *esp = NULL; 292 struct esp_data *esp = NULL;
293 struct crypto_blkcipher *tfm;
283 294
284 /* null auth and encryption can have zero length keys */ 295 /* null auth and encryption can have zero length keys */
285 if (x->aalg) { 296 if (x->aalg) {
@@ -298,24 +309,29 @@ static int esp6_init_state(struct xfrm_state *x)
298 309
299 if (x->aalg) { 310 if (x->aalg) {
300 struct xfrm_algo_desc *aalg_desc; 311 struct xfrm_algo_desc *aalg_desc;
312 struct crypto_hash *hash;
301 313
302 esp->auth.key = x->aalg->alg_key; 314 esp->auth.key = x->aalg->alg_key;
303 esp->auth.key_len = (x->aalg->alg_key_len+7)/8; 315 esp->auth.key_len = (x->aalg->alg_key_len+7)/8;
304 esp->auth.tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); 316 hash = crypto_alloc_hash(x->aalg->alg_name, 0,
305 if (esp->auth.tfm == NULL) 317 CRYPTO_ALG_ASYNC);
318 if (IS_ERR(hash))
319 goto error;
320
321 esp->auth.tfm = hash;
322 if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len))
306 goto error; 323 goto error;
307 esp->auth.icv = esp_hmac_digest;
308 324
309 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 325 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
310 BUG_ON(!aalg_desc); 326 BUG_ON(!aalg_desc);
311 327
312 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 328 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
313 crypto_tfm_alg_digestsize(esp->auth.tfm)) { 329 crypto_hash_digestsize(hash)) {
314 printk(KERN_INFO "ESP: %s digestsize %u != %hu\n", 330 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
315 x->aalg->alg_name, 331 x->aalg->alg_name,
316 crypto_tfm_alg_digestsize(esp->auth.tfm), 332 crypto_hash_digestsize(hash),
317 aalg_desc->uinfo.auth.icv_fullbits/8); 333 aalg_desc->uinfo.auth.icv_fullbits/8);
318 goto error; 334 goto error;
319 } 335 }
320 336
321 esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; 337 esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
@@ -327,13 +343,11 @@ static int esp6_init_state(struct xfrm_state *x)
327 } 343 }
328 esp->conf.key = x->ealg->alg_key; 344 esp->conf.key = x->ealg->alg_key;
329 esp->conf.key_len = (x->ealg->alg_key_len+7)/8; 345 esp->conf.key_len = (x->ealg->alg_key_len+7)/8;
330 if (x->props.ealgo == SADB_EALG_NULL) 346 tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC);
331 esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_ECB); 347 if (IS_ERR(tfm))
332 else
333 esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_CBC);
334 if (esp->conf.tfm == NULL)
335 goto error; 348 goto error;
336 esp->conf.ivlen = crypto_tfm_alg_ivsize(esp->conf.tfm); 349 esp->conf.tfm = tfm;
350 esp->conf.ivlen = crypto_blkcipher_ivsize(tfm);
337 esp->conf.padlen = 0; 351 esp->conf.padlen = 0;
338 if (esp->conf.ivlen) { 352 if (esp->conf.ivlen) {
339 esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL); 353 esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL);
@@ -341,7 +355,7 @@ static int esp6_init_state(struct xfrm_state *x)
341 goto error; 355 goto error;
342 get_random_bytes(esp->conf.ivec, esp->conf.ivlen); 356 get_random_bytes(esp->conf.ivec, esp->conf.ivlen);
343 } 357 }
344 if (crypto_cipher_setkey(esp->conf.tfm, esp->conf.key, esp->conf.key_len)) 358 if (crypto_blkcipher_setkey(tfm, esp->conf.key, esp->conf.key_len))
345 goto error; 359 goto error;
346 x->props.header_len = sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen; 360 x->props.header_len = sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen;
347 if (x->props.mode) 361 if (x->props.mode)
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 7e4d1c17bfbc..a81e9e9d93bd 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -53,7 +53,7 @@
53 53
54struct ipcomp6_tfms { 54struct ipcomp6_tfms {
55 struct list_head list; 55 struct list_head list;
56 struct crypto_tfm **tfms; 56 struct crypto_comp **tfms;
57 int users; 57 int users;
58}; 58};
59 59
@@ -70,7 +70,7 @@ static int ipcomp6_input(struct xfrm_state *x, struct sk_buff *skb)
70 int plen, dlen; 70 int plen, dlen;
71 struct ipcomp_data *ipcd = x->data; 71 struct ipcomp_data *ipcd = x->data;
72 u8 *start, *scratch; 72 u8 *start, *scratch;
73 struct crypto_tfm *tfm; 73 struct crypto_comp *tfm;
74 int cpu; 74 int cpu;
75 75
76 if (skb_linearize_cow(skb)) 76 if (skb_linearize_cow(skb))
@@ -129,7 +129,7 @@ static int ipcomp6_output(struct xfrm_state *x, struct sk_buff *skb)
129 struct ipcomp_data *ipcd = x->data; 129 struct ipcomp_data *ipcd = x->data;
130 int plen, dlen; 130 int plen, dlen;
131 u8 *start, *scratch; 131 u8 *start, *scratch;
132 struct crypto_tfm *tfm; 132 struct crypto_comp *tfm;
133 int cpu; 133 int cpu;
134 134
135 hdr_len = skb->h.raw - skb->data; 135 hdr_len = skb->h.raw - skb->data;
@@ -301,7 +301,7 @@ static void **ipcomp6_alloc_scratches(void)
301 return scratches; 301 return scratches;
302} 302}
303 303
304static void ipcomp6_free_tfms(struct crypto_tfm **tfms) 304static void ipcomp6_free_tfms(struct crypto_comp **tfms)
305{ 305{
306 struct ipcomp6_tfms *pos; 306 struct ipcomp6_tfms *pos;
307 int cpu; 307 int cpu;
@@ -323,28 +323,28 @@ static void ipcomp6_free_tfms(struct crypto_tfm **tfms)
323 return; 323 return;
324 324
325 for_each_possible_cpu(cpu) { 325 for_each_possible_cpu(cpu) {
326 struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu); 326 struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu);
327 crypto_free_tfm(tfm); 327 crypto_free_comp(tfm);
328 } 328 }
329 free_percpu(tfms); 329 free_percpu(tfms);
330} 330}
331 331
332static struct crypto_tfm **ipcomp6_alloc_tfms(const char *alg_name) 332static struct crypto_comp **ipcomp6_alloc_tfms(const char *alg_name)
333{ 333{
334 struct ipcomp6_tfms *pos; 334 struct ipcomp6_tfms *pos;
335 struct crypto_tfm **tfms; 335 struct crypto_comp **tfms;
336 int cpu; 336 int cpu;
337 337
338 /* This can be any valid CPU ID so we don't need locking. */ 338 /* This can be any valid CPU ID so we don't need locking. */
339 cpu = raw_smp_processor_id(); 339 cpu = raw_smp_processor_id();
340 340
341 list_for_each_entry(pos, &ipcomp6_tfms_list, list) { 341 list_for_each_entry(pos, &ipcomp6_tfms_list, list) {
342 struct crypto_tfm *tfm; 342 struct crypto_comp *tfm;
343 343
344 tfms = pos->tfms; 344 tfms = pos->tfms;
345 tfm = *per_cpu_ptr(tfms, cpu); 345 tfm = *per_cpu_ptr(tfms, cpu);
346 346
347 if (!strcmp(crypto_tfm_alg_name(tfm), alg_name)) { 347 if (!strcmp(crypto_comp_name(tfm), alg_name)) {
348 pos->users++; 348 pos->users++;
349 return tfms; 349 return tfms;
350 } 350 }
@@ -358,12 +358,13 @@ static struct crypto_tfm **ipcomp6_alloc_tfms(const char *alg_name)
358 INIT_LIST_HEAD(&pos->list); 358 INIT_LIST_HEAD(&pos->list);
359 list_add(&pos->list, &ipcomp6_tfms_list); 359 list_add(&pos->list, &ipcomp6_tfms_list);
360 360
361 pos->tfms = tfms = alloc_percpu(struct crypto_tfm *); 361 pos->tfms = tfms = alloc_percpu(struct crypto_comp *);
362 if (!tfms) 362 if (!tfms)
363 goto error; 363 goto error;
364 364
365 for_each_possible_cpu(cpu) { 365 for_each_possible_cpu(cpu) {
366 struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0); 366 struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0,
367 CRYPTO_ALG_ASYNC);
367 if (!tfm) 368 if (!tfm)
368 goto error; 369 goto error;
369 *per_cpu_ptr(tfms, cpu) = tfm; 370 *per_cpu_ptr(tfms, cpu) = tfm;
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index ffda1d680529..35c49ff2d062 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -173,7 +173,7 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
173 SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return); 173 SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);
174 174
175 /* Free up the HMAC transform. */ 175 /* Free up the HMAC transform. */
176 sctp_crypto_free_tfm(sctp_sk(ep->base.sk)->hmac); 176 crypto_free_hash(sctp_sk(ep->base.sk)->hmac);
177 177
178 /* Cleanup. */ 178 /* Cleanup. */
179 sctp_inq_free(&ep->base.inqueue); 179 sctp_inq_free(&ep->base.inqueue);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 17b509282cf2..7745bdea7817 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1282,10 +1282,8 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
1282 1282
1283 retval = kmalloc(*cookie_len, GFP_ATOMIC); 1283 retval = kmalloc(*cookie_len, GFP_ATOMIC);
1284 1284
1285 if (!retval) { 1285 if (!retval)
1286 *cookie_len = 0;
1287 goto nodata; 1286 goto nodata;
1288 }
1289 1287
1290 /* Clear this memory since we are sending this data structure 1288 /* Clear this memory since we are sending this data structure
1291 * out on the network. 1289 * out on the network.
@@ -1321,19 +1319,29 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
1321 ntohs(init_chunk->chunk_hdr->length), raw_addrs, addrs_len); 1319 ntohs(init_chunk->chunk_hdr->length), raw_addrs, addrs_len);
1322 1320
1323 if (sctp_sk(ep->base.sk)->hmac) { 1321 if (sctp_sk(ep->base.sk)->hmac) {
1322 struct hash_desc desc;
1323
1324 /* Sign the message. */ 1324 /* Sign the message. */
1325 sg.page = virt_to_page(&cookie->c); 1325 sg.page = virt_to_page(&cookie->c);
1326 sg.offset = (unsigned long)(&cookie->c) % PAGE_SIZE; 1326 sg.offset = (unsigned long)(&cookie->c) % PAGE_SIZE;
1327 sg.length = bodysize; 1327 sg.length = bodysize;
1328 keylen = SCTP_SECRET_SIZE; 1328 keylen = SCTP_SECRET_SIZE;
1329 key = (char *)ep->secret_key[ep->current_key]; 1329 key = (char *)ep->secret_key[ep->current_key];
1330 desc.tfm = sctp_sk(ep->base.sk)->hmac;
1331 desc.flags = 0;
1330 1332
1331 sctp_crypto_hmac(sctp_sk(ep->base.sk)->hmac, key, &keylen, 1333 if (crypto_hash_setkey(desc.tfm, key, keylen) ||
1332 &sg, 1, cookie->signature); 1334 crypto_hash_digest(&desc, &sg, bodysize, cookie->signature))
1335 goto free_cookie;
1333 } 1336 }
1334 1337
1335nodata:
1336 return retval; 1338 return retval;
1339
1340free_cookie:
1341 kfree(retval);
1342nodata:
1343 *cookie_len = 0;
1344 return NULL;
1337} 1345}
1338 1346
1339/* Unpack the cookie from COOKIE ECHO chunk, recreating the association. */ 1347/* Unpack the cookie from COOKIE ECHO chunk, recreating the association. */
@@ -1354,6 +1362,7 @@ struct sctp_association *sctp_unpack_cookie(
1354 sctp_scope_t scope; 1362 sctp_scope_t scope;
1355 struct sk_buff *skb = chunk->skb; 1363 struct sk_buff *skb = chunk->skb;
1356 struct timeval tv; 1364 struct timeval tv;
1365 struct hash_desc desc;
1357 1366
1358 /* Header size is static data prior to the actual cookie, including 1367 /* Header size is static data prior to the actual cookie, including
1359 * any padding. 1368 * any padding.
@@ -1389,17 +1398,25 @@ struct sctp_association *sctp_unpack_cookie(
1389 sg.offset = (unsigned long)(bear_cookie) % PAGE_SIZE; 1398 sg.offset = (unsigned long)(bear_cookie) % PAGE_SIZE;
1390 sg.length = bodysize; 1399 sg.length = bodysize;
1391 key = (char *)ep->secret_key[ep->current_key]; 1400 key = (char *)ep->secret_key[ep->current_key];
1401 desc.tfm = sctp_sk(ep->base.sk)->hmac;
1402 desc.flags = 0;
1392 1403
1393 memset(digest, 0x00, SCTP_SIGNATURE_SIZE); 1404 memset(digest, 0x00, SCTP_SIGNATURE_SIZE);
1394 sctp_crypto_hmac(sctp_sk(ep->base.sk)->hmac, key, &keylen, &sg, 1405 if (crypto_hash_setkey(desc.tfm, key, keylen) ||
1395 1, digest); 1406 crypto_hash_digest(&desc, &sg, bodysize, digest)) {
1407 *error = -SCTP_IERROR_NOMEM;
1408 goto fail;
1409 }
1396 1410
1397 if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) { 1411 if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) {
1398 /* Try the previous key. */ 1412 /* Try the previous key. */
1399 key = (char *)ep->secret_key[ep->last_key]; 1413 key = (char *)ep->secret_key[ep->last_key];
1400 memset(digest, 0x00, SCTP_SIGNATURE_SIZE); 1414 memset(digest, 0x00, SCTP_SIGNATURE_SIZE);
1401 sctp_crypto_hmac(sctp_sk(ep->base.sk)->hmac, key, &keylen, 1415 if (crypto_hash_setkey(desc.tfm, key, keylen) ||
1402 &sg, 1, digest); 1416 crypto_hash_digest(&desc, &sg, bodysize, digest)) {
1417 *error = -SCTP_IERROR_NOMEM;
1418 goto fail;
1419 }
1403 1420
1404 if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) { 1421 if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) {
1405 /* Yikes! Still bad signature! */ 1422 /* Yikes! Still bad signature! */
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index dab15949958e..85caf7963886 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4898,7 +4898,7 @@ SCTP_STATIC int sctp_stream_listen(struct sock *sk, int backlog)
4898int sctp_inet_listen(struct socket *sock, int backlog) 4898int sctp_inet_listen(struct socket *sock, int backlog)
4899{ 4899{
4900 struct sock *sk = sock->sk; 4900 struct sock *sk = sock->sk;
4901 struct crypto_tfm *tfm=NULL; 4901 struct crypto_hash *tfm = NULL;
4902 int err = -EINVAL; 4902 int err = -EINVAL;
4903 4903
4904 if (unlikely(backlog < 0)) 4904 if (unlikely(backlog < 0))
@@ -4911,7 +4911,7 @@ int sctp_inet_listen(struct socket *sock, int backlog)
4911 4911
4912 /* Allocate HMAC for generating cookie. */ 4912 /* Allocate HMAC for generating cookie. */
4913 if (sctp_hmac_alg) { 4913 if (sctp_hmac_alg) {
4914 tfm = sctp_crypto_alloc_tfm(sctp_hmac_alg, 0); 4914 tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC);
4915 if (!tfm) { 4915 if (!tfm) {
4916 err = -ENOSYS; 4916 err = -ENOSYS;
4917 goto out; 4917 goto out;
@@ -4937,7 +4937,7 @@ out:
4937 sctp_release_sock(sk); 4937 sctp_release_sock(sk);
4938 return err; 4938 return err;
4939cleanup: 4939cleanup:
4940 sctp_crypto_free_tfm(tfm); 4940 crypto_free_hash(tfm);
4941 goto out; 4941 goto out;
4942} 4942}
4943 4943
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 76b969e6904f..e11a40b25cce 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -34,6 +34,7 @@
34 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 34 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
35 */ 35 */
36 36
37#include <linux/err.h>
37#include <linux/types.h> 38#include <linux/types.h>
38#include <linux/mm.h> 39#include <linux/mm.h>
39#include <linux/slab.h> 40#include <linux/slab.h>
@@ -49,7 +50,7 @@
49 50
50u32 51u32
51krb5_encrypt( 52krb5_encrypt(
52 struct crypto_tfm *tfm, 53 struct crypto_blkcipher *tfm,
53 void * iv, 54 void * iv,
54 void * in, 55 void * in,
55 void * out, 56 void * out,
@@ -58,26 +59,27 @@ krb5_encrypt(
58 u32 ret = -EINVAL; 59 u32 ret = -EINVAL;
59 struct scatterlist sg[1]; 60 struct scatterlist sg[1];
60 u8 local_iv[16] = {0}; 61 u8 local_iv[16] = {0};
62 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
61 63
62 dprintk("RPC: krb5_encrypt: input data:\n"); 64 dprintk("RPC: krb5_encrypt: input data:\n");
63 print_hexl((u32 *)in, length, 0); 65 print_hexl((u32 *)in, length, 0);
64 66
65 if (length % crypto_tfm_alg_blocksize(tfm) != 0) 67 if (length % crypto_blkcipher_blocksize(tfm) != 0)
66 goto out; 68 goto out;
67 69
68 if (crypto_tfm_alg_ivsize(tfm) > 16) { 70 if (crypto_blkcipher_ivsize(tfm) > 16) {
69 dprintk("RPC: gss_k5encrypt: tfm iv size to large %d\n", 71 dprintk("RPC: gss_k5encrypt: tfm iv size to large %d\n",
70 crypto_tfm_alg_ivsize(tfm)); 72 crypto_blkcipher_ivsize(tfm));
71 goto out; 73 goto out;
72 } 74 }
73 75
74 if (iv) 76 if (iv)
75 memcpy(local_iv, iv, crypto_tfm_alg_ivsize(tfm)); 77 memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
76 78
77 memcpy(out, in, length); 79 memcpy(out, in, length);
78 sg_set_buf(sg, out, length); 80 sg_set_buf(sg, out, length);
79 81
80 ret = crypto_cipher_encrypt_iv(tfm, sg, sg, length, local_iv); 82 ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length);
81 83
82 dprintk("RPC: krb5_encrypt: output data:\n"); 84 dprintk("RPC: krb5_encrypt: output data:\n");
83 print_hexl((u32 *)out, length, 0); 85 print_hexl((u32 *)out, length, 0);
@@ -90,7 +92,7 @@ EXPORT_SYMBOL(krb5_encrypt);
90 92
91u32 93u32
92krb5_decrypt( 94krb5_decrypt(
93 struct crypto_tfm *tfm, 95 struct crypto_blkcipher *tfm,
94 void * iv, 96 void * iv,
95 void * in, 97 void * in,
96 void * out, 98 void * out,
@@ -99,25 +101,26 @@ krb5_decrypt(
99 u32 ret = -EINVAL; 101 u32 ret = -EINVAL;
100 struct scatterlist sg[1]; 102 struct scatterlist sg[1];
101 u8 local_iv[16] = {0}; 103 u8 local_iv[16] = {0};
104 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
102 105
103 dprintk("RPC: krb5_decrypt: input data:\n"); 106 dprintk("RPC: krb5_decrypt: input data:\n");
104 print_hexl((u32 *)in, length, 0); 107 print_hexl((u32 *)in, length, 0);
105 108
106 if (length % crypto_tfm_alg_blocksize(tfm) != 0) 109 if (length % crypto_blkcipher_blocksize(tfm) != 0)
107 goto out; 110 goto out;
108 111
109 if (crypto_tfm_alg_ivsize(tfm) > 16) { 112 if (crypto_blkcipher_ivsize(tfm) > 16) {
110 dprintk("RPC: gss_k5decrypt: tfm iv size to large %d\n", 113 dprintk("RPC: gss_k5decrypt: tfm iv size to large %d\n",
111 crypto_tfm_alg_ivsize(tfm)); 114 crypto_blkcipher_ivsize(tfm));
112 goto out; 115 goto out;
113 } 116 }
114 if (iv) 117 if (iv)
115 memcpy(local_iv,iv, crypto_tfm_alg_ivsize(tfm)); 118 memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm));
116 119
117 memcpy(out, in, length); 120 memcpy(out, in, length);
118 sg_set_buf(sg, out, length); 121 sg_set_buf(sg, out, length);
119 122
120 ret = crypto_cipher_decrypt_iv(tfm, sg, sg, length, local_iv); 123 ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length);
121 124
122 dprintk("RPC: krb5_decrypt: output_data:\n"); 125 dprintk("RPC: krb5_decrypt: output_data:\n");
123 print_hexl((u32 *)out, length, 0); 126 print_hexl((u32 *)out, length, 0);
@@ -197,11 +200,9 @@ out:
197static int 200static int
198checksummer(struct scatterlist *sg, void *data) 201checksummer(struct scatterlist *sg, void *data)
199{ 202{
200 struct crypto_tfm *tfm = (struct crypto_tfm *)data; 203 struct hash_desc *desc = data;
201 204
202 crypto_digest_update(tfm, sg, 1); 205 return crypto_hash_update(desc, sg, sg->length);
203
204 return 0;
205} 206}
206 207
207/* checksum the plaintext data and hdrlen bytes of the token header */ 208/* checksum the plaintext data and hdrlen bytes of the token header */
@@ -210,8 +211,9 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
210 int body_offset, struct xdr_netobj *cksum) 211 int body_offset, struct xdr_netobj *cksum)
211{ 212{
212 char *cksumname; 213 char *cksumname;
213 struct crypto_tfm *tfm = NULL; /* XXX add to ctx? */ 214 struct hash_desc desc; /* XXX add to ctx? */
214 struct scatterlist sg[1]; 215 struct scatterlist sg[1];
216 int err;
215 217
216 switch (cksumtype) { 218 switch (cksumtype) {
217 case CKSUMTYPE_RSA_MD5: 219 case CKSUMTYPE_RSA_MD5:
@@ -222,25 +224,35 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
222 " unsupported checksum %d", cksumtype); 224 " unsupported checksum %d", cksumtype);
223 return GSS_S_FAILURE; 225 return GSS_S_FAILURE;
224 } 226 }
225 if (!(tfm = crypto_alloc_tfm(cksumname, CRYPTO_TFM_REQ_MAY_SLEEP))) 227 desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC);
228 if (IS_ERR(desc.tfm))
226 return GSS_S_FAILURE; 229 return GSS_S_FAILURE;
227 cksum->len = crypto_tfm_alg_digestsize(tfm); 230 cksum->len = crypto_hash_digestsize(desc.tfm);
231 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
228 232
229 crypto_digest_init(tfm); 233 err = crypto_hash_init(&desc);
234 if (err)
235 goto out;
230 sg_set_buf(sg, header, hdrlen); 236 sg_set_buf(sg, header, hdrlen);
231 crypto_digest_update(tfm, sg, 1); 237 err = crypto_hash_update(&desc, sg, hdrlen);
232 process_xdr_buf(body, body_offset, body->len - body_offset, 238 if (err)
233 checksummer, tfm); 239 goto out;
234 crypto_digest_final(tfm, cksum->data); 240 err = process_xdr_buf(body, body_offset, body->len - body_offset,
235 crypto_free_tfm(tfm); 241 checksummer, &desc);
236 return 0; 242 if (err)
243 goto out;
244 err = crypto_hash_final(&desc, cksum->data);
245
246out:
247 crypto_free_hash(desc.tfm);
248 return err ? GSS_S_FAILURE : 0;
237} 249}
238 250
239EXPORT_SYMBOL(make_checksum); 251EXPORT_SYMBOL(make_checksum);
240 252
241struct encryptor_desc { 253struct encryptor_desc {
242 u8 iv[8]; /* XXX hard-coded blocksize */ 254 u8 iv[8]; /* XXX hard-coded blocksize */
243 struct crypto_tfm *tfm; 255 struct blkcipher_desc desc;
244 int pos; 256 int pos;
245 struct xdr_buf *outbuf; 257 struct xdr_buf *outbuf;
246 struct page **pages; 258 struct page **pages;
@@ -285,8 +297,8 @@ encryptor(struct scatterlist *sg, void *data)
285 if (thislen == 0) 297 if (thislen == 0)
286 return 0; 298 return 0;
287 299
288 ret = crypto_cipher_encrypt_iv(desc->tfm, desc->outfrags, desc->infrags, 300 ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags,
289 thislen, desc->iv); 301 desc->infrags, thislen);
290 if (ret) 302 if (ret)
291 return ret; 303 return ret;
292 if (fraglen) { 304 if (fraglen) {
@@ -305,16 +317,18 @@ encryptor(struct scatterlist *sg, void *data)
305} 317}
306 318
307int 319int
308gss_encrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *buf, int offset, 320gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
309 struct page **pages) 321 int offset, struct page **pages)
310{ 322{
311 int ret; 323 int ret;
312 struct encryptor_desc desc; 324 struct encryptor_desc desc;
313 325
314 BUG_ON((buf->len - offset) % crypto_tfm_alg_blocksize(tfm) != 0); 326 BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
315 327
316 memset(desc.iv, 0, sizeof(desc.iv)); 328 memset(desc.iv, 0, sizeof(desc.iv));
317 desc.tfm = tfm; 329 desc.desc.tfm = tfm;
330 desc.desc.info = desc.iv;
331 desc.desc.flags = 0;
318 desc.pos = offset; 332 desc.pos = offset;
319 desc.outbuf = buf; 333 desc.outbuf = buf;
320 desc.pages = pages; 334 desc.pages = pages;
@@ -329,7 +343,7 @@ EXPORT_SYMBOL(gss_encrypt_xdr_buf);
329 343
330struct decryptor_desc { 344struct decryptor_desc {
331 u8 iv[8]; /* XXX hard-coded blocksize */ 345 u8 iv[8]; /* XXX hard-coded blocksize */
332 struct crypto_tfm *tfm; 346 struct blkcipher_desc desc;
333 struct scatterlist frags[4]; 347 struct scatterlist frags[4];
334 int fragno; 348 int fragno;
335 int fraglen; 349 int fraglen;
@@ -355,8 +369,8 @@ decryptor(struct scatterlist *sg, void *data)
355 if (thislen == 0) 369 if (thislen == 0)
356 return 0; 370 return 0;
357 371
358 ret = crypto_cipher_decrypt_iv(desc->tfm, desc->frags, desc->frags, 372 ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags,
359 thislen, desc->iv); 373 desc->frags, thislen);
360 if (ret) 374 if (ret)
361 return ret; 375 return ret;
362 if (fraglen) { 376 if (fraglen) {
@@ -373,15 +387,18 @@ decryptor(struct scatterlist *sg, void *data)
373} 387}
374 388
375int 389int
376gss_decrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *buf, int offset) 390gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
391 int offset)
377{ 392{
378 struct decryptor_desc desc; 393 struct decryptor_desc desc;
379 394
380 /* XXXJBF: */ 395 /* XXXJBF: */
381 BUG_ON((buf->len - offset) % crypto_tfm_alg_blocksize(tfm) != 0); 396 BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
382 397
383 memset(desc.iv, 0, sizeof(desc.iv)); 398 memset(desc.iv, 0, sizeof(desc.iv));
384 desc.tfm = tfm; 399 desc.desc.tfm = tfm;
400 desc.desc.info = desc.iv;
401 desc.desc.flags = 0;
385 desc.fragno = 0; 402 desc.fragno = 0;
386 desc.fraglen = 0; 403 desc.fraglen = 0;
387 return process_xdr_buf(buf, offset, buf->len - offset, decryptor, &desc); 404 return process_xdr_buf(buf, offset, buf->len - offset, decryptor, &desc);
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 70e1e53a632b..325e72e4fd31 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -34,6 +34,7 @@
34 * 34 *
35 */ 35 */
36 36
37#include <linux/err.h>
37#include <linux/module.h> 38#include <linux/module.h>
38#include <linux/init.h> 39#include <linux/init.h>
39#include <linux/types.h> 40#include <linux/types.h>
@@ -78,10 +79,10 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
78} 79}
79 80
80static inline const void * 81static inline const void *
81get_key(const void *p, const void *end, struct crypto_tfm **res) 82get_key(const void *p, const void *end, struct crypto_blkcipher **res)
82{ 83{
83 struct xdr_netobj key; 84 struct xdr_netobj key;
84 int alg, alg_mode; 85 int alg;
85 char *alg_name; 86 char *alg_name;
86 87
87 p = simple_get_bytes(p, end, &alg, sizeof(alg)); 88 p = simple_get_bytes(p, end, &alg, sizeof(alg));
@@ -93,18 +94,19 @@ get_key(const void *p, const void *end, struct crypto_tfm **res)
93 94
94 switch (alg) { 95 switch (alg) {
95 case ENCTYPE_DES_CBC_RAW: 96 case ENCTYPE_DES_CBC_RAW:
96 alg_name = "des"; 97 alg_name = "cbc(des)";
97 alg_mode = CRYPTO_TFM_MODE_CBC;
98 break; 98 break;
99 default: 99 default:
100 printk("gss_kerberos_mech: unsupported algorithm %d\n", alg); 100 printk("gss_kerberos_mech: unsupported algorithm %d\n", alg);
101 goto out_err_free_key; 101 goto out_err_free_key;
102 } 102 }
103 if (!(*res = crypto_alloc_tfm(alg_name, alg_mode))) { 103 *res = crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC);
104 if (IS_ERR(*res)) {
104 printk("gss_kerberos_mech: unable to initialize crypto algorithm %s\n", alg_name); 105 printk("gss_kerberos_mech: unable to initialize crypto algorithm %s\n", alg_name);
106 *res = NULL;
105 goto out_err_free_key; 107 goto out_err_free_key;
106 } 108 }
107 if (crypto_cipher_setkey(*res, key.data, key.len)) { 109 if (crypto_blkcipher_setkey(*res, key.data, key.len)) {
108 printk("gss_kerberos_mech: error setting key for crypto algorithm %s\n", alg_name); 110 printk("gss_kerberos_mech: error setting key for crypto algorithm %s\n", alg_name);
109 goto out_err_free_tfm; 111 goto out_err_free_tfm;
110 } 112 }
@@ -113,7 +115,7 @@ get_key(const void *p, const void *end, struct crypto_tfm **res)
113 return p; 115 return p;
114 116
115out_err_free_tfm: 117out_err_free_tfm:
116 crypto_free_tfm(*res); 118 crypto_free_blkcipher(*res);
117out_err_free_key: 119out_err_free_key:
118 kfree(key.data); 120 kfree(key.data);
119 p = ERR_PTR(-EINVAL); 121 p = ERR_PTR(-EINVAL);
@@ -172,9 +174,9 @@ gss_import_sec_context_kerberos(const void *p,
172 return 0; 174 return 0;
173 175
174out_err_free_key2: 176out_err_free_key2:
175 crypto_free_tfm(ctx->seq); 177 crypto_free_blkcipher(ctx->seq);
176out_err_free_key1: 178out_err_free_key1:
177 crypto_free_tfm(ctx->enc); 179 crypto_free_blkcipher(ctx->enc);
178out_err_free_mech: 180out_err_free_mech:
179 kfree(ctx->mech_used.data); 181 kfree(ctx->mech_used.data);
180out_err_free_ctx: 182out_err_free_ctx:
@@ -187,8 +189,8 @@ static void
187gss_delete_sec_context_kerberos(void *internal_ctx) { 189gss_delete_sec_context_kerberos(void *internal_ctx) {
188 struct krb5_ctx *kctx = internal_ctx; 190 struct krb5_ctx *kctx = internal_ctx;
189 191
190 crypto_free_tfm(kctx->seq); 192 crypto_free_blkcipher(kctx->seq);
191 crypto_free_tfm(kctx->enc); 193 crypto_free_blkcipher(kctx->enc);
192 kfree(kctx->mech_used.data); 194 kfree(kctx->mech_used.data);
193 kfree(kctx); 195 kfree(kctx);
194} 196}
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
index c53ead39118d..c604baf3a5f6 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
@@ -41,7 +41,7 @@
41#endif 41#endif
42 42
43s32 43s32
44krb5_make_seq_num(struct crypto_tfm *key, 44krb5_make_seq_num(struct crypto_blkcipher *key,
45 int direction, 45 int direction,
46 s32 seqnum, 46 s32 seqnum,
47 unsigned char *cksum, unsigned char *buf) 47 unsigned char *cksum, unsigned char *buf)
@@ -62,7 +62,7 @@ krb5_make_seq_num(struct crypto_tfm *key,
62} 62}
63 63
64s32 64s32
65krb5_get_seq_num(struct crypto_tfm *key, 65krb5_get_seq_num(struct crypto_blkcipher *key,
66 unsigned char *cksum, 66 unsigned char *cksum,
67 unsigned char *buf, 67 unsigned char *buf,
68 int *direction, s32 * seqnum) 68 int *direction, s32 * seqnum)
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 89d1f3e14128..f179415d0c38 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -149,7 +149,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
149 goto out_err; 149 goto out_err;
150 } 150 }
151 151
152 blocksize = crypto_tfm_alg_blocksize(kctx->enc); 152 blocksize = crypto_blkcipher_blocksize(kctx->enc);
153 gss_krb5_add_padding(buf, offset, blocksize); 153 gss_krb5_add_padding(buf, offset, blocksize);
154 BUG_ON((buf->len - offset) % blocksize); 154 BUG_ON((buf->len - offset) % blocksize);
155 plainlen = blocksize + buf->len - offset; 155 plainlen = blocksize + buf->len - offset;
@@ -346,7 +346,7 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
346 /* Copy the data back to the right position. XXX: Would probably be 346 /* Copy the data back to the right position. XXX: Would probably be
347 * better to copy and encrypt at the same time. */ 347 * better to copy and encrypt at the same time. */
348 348
349 blocksize = crypto_tfm_alg_blocksize(kctx->enc); 349 blocksize = crypto_blkcipher_blocksize(kctx->enc);
350 data_start = ptr + 22 + blocksize; 350 data_start = ptr + 22 + blocksize;
351 orig_start = buf->head[0].iov_base + offset; 351 orig_start = buf->head[0].iov_base + offset;
352 data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; 352 data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c
index 88dcb52d171b..bdedf456bc17 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_mech.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c
@@ -34,6 +34,7 @@
34 * 34 *
35 */ 35 */
36 36
37#include <linux/err.h>
37#include <linux/module.h> 38#include <linux/module.h>
38#include <linux/init.h> 39#include <linux/init.h>
39#include <linux/types.h> 40#include <linux/types.h>
@@ -83,10 +84,11 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
83} 84}
84 85
85static inline const void * 86static inline const void *
86get_key(const void *p, const void *end, struct crypto_tfm **res, int *resalg) 87get_key(const void *p, const void *end, struct crypto_blkcipher **res,
88 int *resalg)
87{ 89{
88 struct xdr_netobj key = { 0 }; 90 struct xdr_netobj key = { 0 };
89 int alg_mode,setkey = 0; 91 int setkey = 0;
90 char *alg_name; 92 char *alg_name;
91 93
92 p = simple_get_bytes(p, end, resalg, sizeof(*resalg)); 94 p = simple_get_bytes(p, end, resalg, sizeof(*resalg));
@@ -98,14 +100,12 @@ get_key(const void *p, const void *end, struct crypto_tfm **res, int *resalg)
98 100
99 switch (*resalg) { 101 switch (*resalg) {
100 case NID_des_cbc: 102 case NID_des_cbc:
101 alg_name = "des"; 103 alg_name = "cbc(des)";
102 alg_mode = CRYPTO_TFM_MODE_CBC;
103 setkey = 1; 104 setkey = 1;
104 break; 105 break;
105 case NID_cast5_cbc: 106 case NID_cast5_cbc:
106 /* XXXX here in name only, not used */ 107 /* XXXX here in name only, not used */
107 alg_name = "cast5"; 108 alg_name = "cbc(cast5)";
108 alg_mode = CRYPTO_TFM_MODE_CBC;
109 setkey = 0; /* XXX will need to set to 1 */ 109 setkey = 0; /* XXX will need to set to 1 */
110 break; 110 break;
111 case NID_md5: 111 case NID_md5:
@@ -113,19 +113,20 @@ get_key(const void *p, const void *end, struct crypto_tfm **res, int *resalg)
113 dprintk("RPC: SPKM3 get_key: NID_md5 zero Key length\n"); 113 dprintk("RPC: SPKM3 get_key: NID_md5 zero Key length\n");
114 } 114 }
115 alg_name = "md5"; 115 alg_name = "md5";
116 alg_mode = 0;
117 setkey = 0; 116 setkey = 0;
118 break; 117 break;
119 default: 118 default:
120 dprintk("gss_spkm3_mech: unsupported algorithm %d\n", *resalg); 119 dprintk("gss_spkm3_mech: unsupported algorithm %d\n", *resalg);
121 goto out_err_free_key; 120 goto out_err_free_key;
122 } 121 }
123 if (!(*res = crypto_alloc_tfm(alg_name, alg_mode))) { 122 *res = crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC);
123 if (IS_ERR(*res)) {
124 printk("gss_spkm3_mech: unable to initialize crypto algorthm %s\n", alg_name); 124 printk("gss_spkm3_mech: unable to initialize crypto algorthm %s\n", alg_name);
125 *res = NULL;
125 goto out_err_free_key; 126 goto out_err_free_key;
126 } 127 }
127 if (setkey) { 128 if (setkey) {
128 if (crypto_cipher_setkey(*res, key.data, key.len)) { 129 if (crypto_blkcipher_setkey(*res, key.data, key.len)) {
129 printk("gss_spkm3_mech: error setting key for crypto algorthm %s\n", alg_name); 130 printk("gss_spkm3_mech: error setting key for crypto algorthm %s\n", alg_name);
130 goto out_err_free_tfm; 131 goto out_err_free_tfm;
131 } 132 }
@@ -136,7 +137,7 @@ get_key(const void *p, const void *end, struct crypto_tfm **res, int *resalg)
136 return p; 137 return p;
137 138
138out_err_free_tfm: 139out_err_free_tfm:
139 crypto_free_tfm(*res); 140 crypto_free_blkcipher(*res);
140out_err_free_key: 141out_err_free_key:
141 if(key.len > 0) 142 if(key.len > 0)
142 kfree(key.data); 143 kfree(key.data);
@@ -204,9 +205,9 @@ gss_import_sec_context_spkm3(const void *p, size_t len,
204 return 0; 205 return 0;
205 206
206out_err_free_key2: 207out_err_free_key2:
207 crypto_free_tfm(ctx->derived_integ_key); 208 crypto_free_blkcipher(ctx->derived_integ_key);
208out_err_free_key1: 209out_err_free_key1:
209 crypto_free_tfm(ctx->derived_conf_key); 210 crypto_free_blkcipher(ctx->derived_conf_key);
210out_err_free_s_key: 211out_err_free_s_key:
211 kfree(ctx->share_key.data); 212 kfree(ctx->share_key.data);
212out_err_free_mech: 213out_err_free_mech:
@@ -223,8 +224,8 @@ static void
223gss_delete_sec_context_spkm3(void *internal_ctx) { 224gss_delete_sec_context_spkm3(void *internal_ctx) {
224 struct spkm3_ctx *sctx = internal_ctx; 225 struct spkm3_ctx *sctx = internal_ctx;
225 226
226 crypto_free_tfm(sctx->derived_integ_key); 227 crypto_free_blkcipher(sctx->derived_integ_key);
227 crypto_free_tfm(sctx->derived_conf_key); 228 crypto_free_blkcipher(sctx->derived_conf_key);
228 kfree(sctx->share_key.data); 229 kfree(sctx->share_key.data);
229 kfree(sctx->mech_used.data); 230 kfree(sctx->mech_used.data);
230 kfree(sctx); 231 kfree(sctx);
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 04e1aea58bc9..5a0dbeb6bbe8 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -30,7 +30,8 @@
30 */ 30 */
31static struct xfrm_algo_desc aalg_list[] = { 31static struct xfrm_algo_desc aalg_list[] = {
32{ 32{
33 .name = "digest_null", 33 .name = "hmac(digest_null)",
34 .compat = "digest_null",
34 35
35 .uinfo = { 36 .uinfo = {
36 .auth = { 37 .auth = {
@@ -47,7 +48,8 @@ static struct xfrm_algo_desc aalg_list[] = {
47 } 48 }
48}, 49},
49{ 50{
50 .name = "md5", 51 .name = "hmac(md5)",
52 .compat = "md5",
51 53
52 .uinfo = { 54 .uinfo = {
53 .auth = { 55 .auth = {
@@ -64,7 +66,8 @@ static struct xfrm_algo_desc aalg_list[] = {
64 } 66 }
65}, 67},
66{ 68{
67 .name = "sha1", 69 .name = "hmac(sha1)",
70 .compat = "sha1",
68 71
69 .uinfo = { 72 .uinfo = {
70 .auth = { 73 .auth = {
@@ -81,7 +84,8 @@ static struct xfrm_algo_desc aalg_list[] = {
81 } 84 }
82}, 85},
83{ 86{
84 .name = "sha256", 87 .name = "hmac(sha256)",
88 .compat = "sha256",
85 89
86 .uinfo = { 90 .uinfo = {
87 .auth = { 91 .auth = {
@@ -98,7 +102,8 @@ static struct xfrm_algo_desc aalg_list[] = {
98 } 102 }
99}, 103},
100{ 104{
101 .name = "ripemd160", 105 .name = "hmac(ripemd160)",
106 .compat = "ripemd160",
102 107
103 .uinfo = { 108 .uinfo = {
104 .auth = { 109 .auth = {
@@ -118,7 +123,8 @@ static struct xfrm_algo_desc aalg_list[] = {
118 123
119static struct xfrm_algo_desc ealg_list[] = { 124static struct xfrm_algo_desc ealg_list[] = {
120{ 125{
121 .name = "cipher_null", 126 .name = "ecb(cipher_null)",
127 .compat = "cipher_null",
122 128
123 .uinfo = { 129 .uinfo = {
124 .encr = { 130 .encr = {
@@ -135,7 +141,8 @@ static struct xfrm_algo_desc ealg_list[] = {
135 } 141 }
136}, 142},
137{ 143{
138 .name = "des", 144 .name = "cbc(des)",
145 .compat = "des",
139 146
140 .uinfo = { 147 .uinfo = {
141 .encr = { 148 .encr = {
@@ -152,7 +159,8 @@ static struct xfrm_algo_desc ealg_list[] = {
152 } 159 }
153}, 160},
154{ 161{
155 .name = "des3_ede", 162 .name = "cbc(des3_ede)",
163 .compat = "des3_ede",
156 164
157 .uinfo = { 165 .uinfo = {
158 .encr = { 166 .encr = {
@@ -169,7 +177,8 @@ static struct xfrm_algo_desc ealg_list[] = {
169 } 177 }
170}, 178},
171{ 179{
172 .name = "cast128", 180 .name = "cbc(cast128)",
181 .compat = "cast128",
173 182
174 .uinfo = { 183 .uinfo = {
175 .encr = { 184 .encr = {
@@ -186,7 +195,8 @@ static struct xfrm_algo_desc ealg_list[] = {
186 } 195 }
187}, 196},
188{ 197{
189 .name = "blowfish", 198 .name = "cbc(blowfish)",
199 .compat = "blowfish",
190 200
191 .uinfo = { 201 .uinfo = {
192 .encr = { 202 .encr = {
@@ -203,7 +213,8 @@ static struct xfrm_algo_desc ealg_list[] = {
203 } 213 }
204}, 214},
205{ 215{
206 .name = "aes", 216 .name = "cbc(aes)",
217 .compat = "aes",
207 218
208 .uinfo = { 219 .uinfo = {
209 .encr = { 220 .encr = {
@@ -220,7 +231,8 @@ static struct xfrm_algo_desc ealg_list[] = {
220 } 231 }
221}, 232},
222{ 233{
223 .name = "serpent", 234 .name = "cbc(serpent)",
235 .compat = "serpent",
224 236
225 .uinfo = { 237 .uinfo = {
226 .encr = { 238 .encr = {
@@ -237,7 +249,8 @@ static struct xfrm_algo_desc ealg_list[] = {
237 } 249 }
238}, 250},
239{ 251{
240 .name = "twofish", 252 .name = "cbc(twofish)",
253 .compat = "twofish",
241 254
242 .uinfo = { 255 .uinfo = {
243 .encr = { 256 .encr = {
@@ -350,8 +363,8 @@ struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id)
350EXPORT_SYMBOL_GPL(xfrm_calg_get_byid); 363EXPORT_SYMBOL_GPL(xfrm_calg_get_byid);
351 364
352static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list, 365static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list,
353 int entries, char *name, 366 int entries, u32 type, u32 mask,
354 int probe) 367 char *name, int probe)
355{ 368{
356 int i, status; 369 int i, status;
357 370
@@ -359,7 +372,8 @@ static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list,
359 return NULL; 372 return NULL;
360 373
361 for (i = 0; i < entries; i++) { 374 for (i = 0; i < entries; i++) {
362 if (strcmp(name, list[i].name)) 375 if (strcmp(name, list[i].name) &&
376 (!list[i].compat || strcmp(name, list[i].compat)))
363 continue; 377 continue;
364 378
365 if (list[i].available) 379 if (list[i].available)
@@ -368,7 +382,7 @@ static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list,
368 if (!probe) 382 if (!probe)
369 break; 383 break;
370 384
371 status = crypto_alg_available(name, 0); 385 status = crypto_has_alg(name, type, mask | CRYPTO_ALG_ASYNC);
372 if (!status) 386 if (!status)
373 break; 387 break;
374 388
@@ -380,19 +394,25 @@ static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list,
380 394
381struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe) 395struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe)
382{ 396{
383 return xfrm_get_byname(aalg_list, aalg_entries(), name, probe); 397 return xfrm_get_byname(aalg_list, aalg_entries(),
398 CRYPTO_ALG_TYPE_HASH, CRYPTO_ALG_TYPE_HASH_MASK,
399 name, probe);
384} 400}
385EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname); 401EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname);
386 402
387struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe) 403struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe)
388{ 404{
389 return xfrm_get_byname(ealg_list, ealg_entries(), name, probe); 405 return xfrm_get_byname(ealg_list, ealg_entries(),
406 CRYPTO_ALG_TYPE_BLKCIPHER, CRYPTO_ALG_TYPE_MASK,
407 name, probe);
390} 408}
391EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname); 409EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname);
392 410
393struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe) 411struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe)
394{ 412{
395 return xfrm_get_byname(calg_list, calg_entries(), name, probe); 413 return xfrm_get_byname(calg_list, calg_entries(),
414 CRYPTO_ALG_TYPE_COMPRESS, CRYPTO_ALG_TYPE_MASK,
415 name, probe);
396} 416}
397EXPORT_SYMBOL_GPL(xfrm_calg_get_byname); 417EXPORT_SYMBOL_GPL(xfrm_calg_get_byname);
398 418
@@ -427,19 +447,22 @@ void xfrm_probe_algs(void)
427 BUG_ON(in_softirq()); 447 BUG_ON(in_softirq());
428 448
429 for (i = 0; i < aalg_entries(); i++) { 449 for (i = 0; i < aalg_entries(); i++) {
430 status = crypto_alg_available(aalg_list[i].name, 0); 450 status = crypto_has_hash(aalg_list[i].name, 0,
451 CRYPTO_ALG_ASYNC);
431 if (aalg_list[i].available != status) 452 if (aalg_list[i].available != status)
432 aalg_list[i].available = status; 453 aalg_list[i].available = status;
433 } 454 }
434 455
435 for (i = 0; i < ealg_entries(); i++) { 456 for (i = 0; i < ealg_entries(); i++) {
436 status = crypto_alg_available(ealg_list[i].name, 0); 457 status = crypto_has_blkcipher(ealg_list[i].name, 0,
458 CRYPTO_ALG_ASYNC);
437 if (ealg_list[i].available != status) 459 if (ealg_list[i].available != status)
438 ealg_list[i].available = status; 460 ealg_list[i].available = status;
439 } 461 }
440 462
441 for (i = 0; i < calg_entries(); i++) { 463 for (i = 0; i < calg_entries(); i++) {
442 status = crypto_alg_available(calg_list[i].name, 0); 464 status = crypto_has_comp(calg_list[i].name, 0,
465 CRYPTO_ALG_ASYNC);
443 if (calg_list[i].available != status) 466 if (calg_list[i].available != status)
444 calg_list[i].available = status; 467 calg_list[i].available = status;
445 } 468 }
@@ -471,11 +494,12 @@ EXPORT_SYMBOL_GPL(xfrm_count_enc_supported);
471 494
472/* Move to common area: it is shared with AH. */ 495/* Move to common area: it is shared with AH. */
473 496
474void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm, 497int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
475 int offset, int len, icv_update_fn_t icv_update) 498 int offset, int len, icv_update_fn_t icv_update)
476{ 499{
477 int start = skb_headlen(skb); 500 int start = skb_headlen(skb);
478 int i, copy = start - offset; 501 int i, copy = start - offset;
502 int err;
479 struct scatterlist sg; 503 struct scatterlist sg;
480 504
481 /* Checksum header. */ 505 /* Checksum header. */
@@ -487,10 +511,12 @@ void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
487 sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE; 511 sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
488 sg.length = copy; 512 sg.length = copy;
489 513
490 icv_update(tfm, &sg, 1); 514 err = icv_update(desc, &sg, copy);
515 if (unlikely(err))
516 return err;
491 517
492 if ((len -= copy) == 0) 518 if ((len -= copy) == 0)
493 return; 519 return 0;
494 offset += copy; 520 offset += copy;
495 } 521 }
496 522
@@ -510,10 +536,12 @@ void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
510 sg.offset = frag->page_offset + offset-start; 536 sg.offset = frag->page_offset + offset-start;
511 sg.length = copy; 537 sg.length = copy;
512 538
513 icv_update(tfm, &sg, 1); 539 err = icv_update(desc, &sg, copy);
540 if (unlikely(err))
541 return err;
514 542
515 if (!(len -= copy)) 543 if (!(len -= copy))
516 return; 544 return 0;
517 offset += copy; 545 offset += copy;
518 } 546 }
519 start = end; 547 start = end;
@@ -531,15 +559,19 @@ void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
531 if ((copy = end - offset) > 0) { 559 if ((copy = end - offset) > 0) {
532 if (copy > len) 560 if (copy > len)
533 copy = len; 561 copy = len;
534 skb_icv_walk(list, tfm, offset-start, copy, icv_update); 562 err = skb_icv_walk(list, desc, offset-start,
563 copy, icv_update);
564 if (unlikely(err))
565 return err;
535 if ((len -= copy) == 0) 566 if ((len -= copy) == 0)
536 return; 567 return 0;
537 offset += copy; 568 offset += copy;
538 } 569 }
539 start = end; 570 start = end;
540 } 571 }
541 } 572 }
542 BUG_ON(len); 573 BUG_ON(len);
574 return 0;
543} 575}
544EXPORT_SYMBOL_GPL(skb_icv_walk); 576EXPORT_SYMBOL_GPL(skb_icv_walk);
545 577
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 3e6a722d072e..fa79ddc4239e 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -10,6 +10,7 @@
10 * 10 *
11 */ 11 */
12 12
13#include <linux/crypto.h>
13#include <linux/module.h> 14#include <linux/module.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/types.h> 16#include <linux/types.h>
@@ -212,6 +213,7 @@ static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
212 return -ENOMEM; 213 return -ENOMEM;
213 214
214 memcpy(p, ualg, len); 215 memcpy(p, ualg, len);
216 strcpy(p->alg_name, algo->name);
215 *algpp = p; 217 *algpp = p;
216 return 0; 218 return 0;
217} 219}
diff --git a/security/seclvl.c b/security/seclvl.c
index c26dd7de0471..8f6291991fbc 100644
--- a/security/seclvl.c
+++ b/security/seclvl.c
@@ -16,6 +16,7 @@
16 * (at your option) any later version. 16 * (at your option) any later version.
17 */ 17 */
18 18
19#include <linux/err.h>
19#include <linux/module.h> 20#include <linux/module.h>
20#include <linux/moduleparam.h> 21#include <linux/moduleparam.h>
21#include <linux/kernel.h> 22#include <linux/kernel.h>
@@ -197,26 +198,27 @@ static unsigned char hashedPassword[SHA1_DIGEST_SIZE];
197static int 198static int
198plaintext_to_sha1(unsigned char *hash, const char *plaintext, unsigned int len) 199plaintext_to_sha1(unsigned char *hash, const char *plaintext, unsigned int len)
199{ 200{
200 struct crypto_tfm *tfm; 201 struct hash_desc desc;
201 struct scatterlist sg; 202 struct scatterlist sg;
203 int err;
204
202 if (len > PAGE_SIZE) { 205 if (len > PAGE_SIZE) {
203 seclvl_printk(0, KERN_ERR, "Plaintext password too large (%d " 206 seclvl_printk(0, KERN_ERR, "Plaintext password too large (%d "
204 "characters). Largest possible is %lu " 207 "characters). Largest possible is %lu "
205 "bytes.\n", len, PAGE_SIZE); 208 "bytes.\n", len, PAGE_SIZE);
206 return -EINVAL; 209 return -EINVAL;
207 } 210 }
208 tfm = crypto_alloc_tfm("sha1", CRYPTO_TFM_REQ_MAY_SLEEP); 211 desc.tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC);
209 if (tfm == NULL) { 212 if (IS_ERR(desc.tfm)) {
210 seclvl_printk(0, KERN_ERR, 213 seclvl_printk(0, KERN_ERR,
211 "Failed to load transform for SHA1\n"); 214 "Failed to load transform for SHA1\n");
212 return -EINVAL; 215 return -EINVAL;
213 } 216 }
214 sg_init_one(&sg, (u8 *)plaintext, len); 217 sg_init_one(&sg, (u8 *)plaintext, len);
215 crypto_digest_init(tfm); 218 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
216 crypto_digest_update(tfm, &sg, 1); 219 err = crypto_hash_digest(&desc, &sg, len, hash);
217 crypto_digest_final(tfm, hash); 220 crypto_free_hash(desc.tfm);
218 crypto_free_tfm(tfm); 221 return err;
219 return 0;
220} 222}
221 223
222/** 224/**