aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRik Snel <rsnel@cube.dyndns.org>2006-11-25 17:43:10 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-12-06 21:38:56 -0500
commit64470f1b8510699dc357a44004dc924bc139c917 (patch)
tree188d414266091c2220bae155651b2aacc2c6b9aa
parentc494e0705d670c51ac736c8c4d92750705fe3187 (diff)
[CRYPTO] lrw: Liskov Rivest Wagner, a tweakable narrow block cipher mode
Main module, this implements the Liskov Rivest Wagner block cipher mode in the new blockcipher API. The implementation is based on ecb.c. The LRW-32-AES specification I used can be found at: http://grouper.ieee.org/groups/1619/email/pdf00017.pdf It implements the optimization specified as optional in the specification, and in addition it uses optimized multiplication routines from gf128mul.c. Since gf128mul.[ch] is not tested on bigendian, this cipher mode may currently fail badly on bigendian machines. Signed-off-by: Rik Snel <rsnel@cube.dyndns.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--crypto/Kconfig13
-rw-r--r--crypto/Makefile1
-rw-r--r--crypto/lrw.c301
3 files changed, 315 insertions, 0 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index f941ffb2a087..92ba249f3a5b 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -168,6 +168,19 @@ config CRYPTO_CBC
168 CBC: Cipher Block Chaining mode 168 CBC: Cipher Block Chaining mode
169 This block cipher algorithm is required for IPSec. 169 This block cipher algorithm is required for IPSec.
170 170
171config CRYPTO_LRW
172 tristate "LRW support (EXPERIMENTAL)"
173 depends on EXPERIMENTAL
174 select CRYPTO_BLKCIPHER
175 select CRYPTO_MANAGER
176 select CRYPTO_GF128MUL
177 help
178 LRW: Liskov Rivest Wagner, a tweakable, non malleable, non movable
179 narrow block cipher mode for dm-crypt. Use it with cipher
180 specification string aes-lrw-benbi, the key must be 256, 320 or 384.
181 The first 128, 192 or 256 bits in the key are used for AES and the
182 rest is used to tie each cipher block to its logical position.
183
171config CRYPTO_DES 184config CRYPTO_DES
172 tristate "DES and Triple DES EDE cipher algorithms" 185 tristate "DES and Triple DES EDE cipher algorithms"
173 select CRYPTO_ALGAPI 186 select CRYPTO_ALGAPI
diff --git a/crypto/Makefile b/crypto/Makefile
index 0ab9ff045e9a..60e3d24f61f5 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
27obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o 27obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
28obj-$(CONFIG_CRYPTO_ECB) += ecb.o 28obj-$(CONFIG_CRYPTO_ECB) += ecb.o
29obj-$(CONFIG_CRYPTO_CBC) += cbc.o 29obj-$(CONFIG_CRYPTO_CBC) += cbc.o
30obj-$(CONFIG_CRYPTO_LRW) += lrw.o
30obj-$(CONFIG_CRYPTO_DES) += des.o 31obj-$(CONFIG_CRYPTO_DES) += des.o
31obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o 32obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o
32obj-$(CONFIG_CRYPTO_TWOFISH) += twofish.o 33obj-$(CONFIG_CRYPTO_TWOFISH) += twofish.o
diff --git a/crypto/lrw.c b/crypto/lrw.c
new file mode 100644
index 000000000000..5d043158b903
--- /dev/null
+++ b/crypto/lrw.c
@@ -0,0 +1,301 @@
1/* LRW: as defined by Cyril Guyot in
2 * http://grouper.ieee.org/groups/1619/email/pdf00017.pdf
3 *
4 * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org>
5 *
6 * Based om ecb.c
7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 */
14/* This implementation is checked against the test vectors in the above
15 * document and by a test vector provided by Ken Buchanan at
16 * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html
17 *
18 * The test vectors are included in the testing module tcrypt.[ch] */
19#include <crypto/algapi.h>
20#include <linux/err.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/scatterlist.h>
25#include <linux/slab.h>
26
27#include <crypto/b128ops.h>
28#include <crypto/gf128mul.h>
29
30struct priv {
31 struct crypto_cipher *child;
32 /* optimizes multiplying a random (non incrementing, as at the
33 * start of a new sector) value with key2, we could also have
34 * used 4k optimization tables or no optimization at all. In the
35 * latter case we would have to store key2 here */
36 struct gf128mul_64k *table;
37 /* stores:
38 * key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 },
39 * key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 }
40 * key2*{ 0,0,...1,1,1,1,1 }, etc
41 * needed for optimized multiplication of incrementing values
42 * with key2 */
43 be128 mulinc[128];
44};
45
46static inline void setbit128_bbe(void *b, int bit)
47{
48 __set_bit(bit ^ 0x78, b);
49}
50
51static int setkey(struct crypto_tfm *parent, const u8 *key,
52 unsigned int keylen)
53{
54 struct priv *ctx = crypto_tfm_ctx(parent);
55 struct crypto_cipher *child = ctx->child;
56 int err, i;
57 be128 tmp = { 0 };
58 int bsize = crypto_cipher_blocksize(child);
59
60 crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
61 crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
62 CRYPTO_TFM_REQ_MASK);
63 if ((err = crypto_cipher_setkey(child, key, keylen - bsize)))
64 return err;
65 crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
66 CRYPTO_TFM_RES_MASK);
67
68 if (ctx->table)
69 gf128mul_free_64k(ctx->table);
70
71 /* initialize multiplication table for Key2 */
72 ctx->table = gf128mul_init_64k_bbe((be128 *)(key + keylen - bsize));
73 if (!ctx->table)
74 return -ENOMEM;
75
76 /* initialize optimization table */
77 for (i = 0; i < 128; i++) {
78 setbit128_bbe(&tmp, i);
79 ctx->mulinc[i] = tmp;
80 gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table);
81 }
82
83 return 0;
84}
85
86struct sinfo {
87 be128 t;
88 struct crypto_tfm *tfm;
89 void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
90};
91
92static inline void inc(be128 *iv)
93{
94 if (!(iv->b = cpu_to_be64(be64_to_cpu(iv->b) + 1)))
95 iv->a = cpu_to_be64(be64_to_cpu(iv->a) + 1);
96}
97
98static inline void round(struct sinfo *s, void *dst, const void *src)
99{
100 be128_xor(dst, &s->t, src); /* PP <- T xor P */
101 s->fn(s->tfm, dst, dst); /* CC <- E(Key2,PP) */
102 be128_xor(dst, dst, &s->t); /* C <- T xor CC */
103}
104
105/* this returns the number of consequative 1 bits starting
106 * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */
107static inline int get_index128(be128 *block)
108{
109 int x;
110 __be32 *p = (__be32 *) block;
111
112 for (p += 3, x = 0; x < 128; p--, x += 32) {
113 u32 val = be32_to_cpup(p);
114
115 if (!~val)
116 continue;
117
118 return x + ffz(val);
119 }
120
121 return x;
122}
123
124static int crypt(struct blkcipher_desc *d,
125 struct blkcipher_walk *w, struct priv *ctx,
126 void (*fn)(struct crypto_tfm *, u8 *, const u8 *))
127{
128 int err;
129 unsigned int avail;
130 const int bs = crypto_cipher_blocksize(ctx->child);
131 struct sinfo s = {
132 .tfm = crypto_cipher_tfm(ctx->child),
133 .fn = fn
134 };
135 be128 *iv;
136 u8 *wsrc;
137 u8 *wdst;
138
139 err = blkcipher_walk_virt(d, w);
140 if (!(avail = w->nbytes))
141 return err;
142
143 wsrc = w->src.virt.addr;
144 wdst = w->dst.virt.addr;
145
146 /* calculate first value of T */
147 iv = (be128 *)w->iv;
148 s.t = *iv;
149
150 /* T <- I*Key2 */
151 gf128mul_64k_bbe(&s.t, ctx->table);
152
153 goto first;
154
155 for (;;) {
156 do {
157 /* T <- I*Key2, using the optimization
158 * discussed in the specification */
159 be128_xor(&s.t, &s.t, &ctx->mulinc[get_index128(iv)]);
160 inc(iv);
161
162first:
163 round(&s, wdst, wsrc);
164
165 wsrc += bs;
166 wdst += bs;
167 } while ((avail -= bs) >= bs);
168
169 err = blkcipher_walk_done(d, w, avail);
170 if (!(avail = w->nbytes))
171 break;
172
173 wsrc = w->src.virt.addr;
174 wdst = w->dst.virt.addr;
175 }
176
177 return err;
178}
179
180static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
181 struct scatterlist *src, unsigned int nbytes)
182{
183 struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
184 struct blkcipher_walk w;
185
186 blkcipher_walk_init(&w, dst, src, nbytes);
187 return crypt(desc, &w, ctx,
188 crypto_cipher_alg(ctx->child)->cia_encrypt);
189}
190
191static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
192 struct scatterlist *src, unsigned int nbytes)
193{
194 struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
195 struct blkcipher_walk w;
196
197 blkcipher_walk_init(&w, dst, src, nbytes);
198 return crypt(desc, &w, ctx,
199 crypto_cipher_alg(ctx->child)->cia_decrypt);
200}
201
202static int init_tfm(struct crypto_tfm *tfm)
203{
204 struct crypto_instance *inst = (void *)tfm->__crt_alg;
205 struct crypto_spawn *spawn = crypto_instance_ctx(inst);
206 struct priv *ctx = crypto_tfm_ctx(tfm);
207 u32 *flags = &tfm->crt_flags;
208
209 tfm = crypto_spawn_tfm(spawn);
210 if (IS_ERR(tfm))
211 return PTR_ERR(tfm);
212
213 if (crypto_tfm_alg_blocksize(tfm) != 16) {
214 *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
215 return -EINVAL;
216 }
217
218 ctx->child = crypto_cipher_cast(tfm);
219 return 0;
220}
221
222static void exit_tfm(struct crypto_tfm *tfm)
223{
224 struct priv *ctx = crypto_tfm_ctx(tfm);
225 if (ctx->table)
226 gf128mul_free_64k(ctx->table);
227 crypto_free_cipher(ctx->child);
228}
229
230static struct crypto_instance *alloc(void *param, unsigned int len)
231{
232 struct crypto_instance *inst;
233 struct crypto_alg *alg;
234
235 alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER,
236 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
237 if (IS_ERR(alg))
238 return ERR_PTR(PTR_ERR(alg));
239
240 inst = crypto_alloc_instance("lrw", alg);
241 if (IS_ERR(inst))
242 goto out_put_alg;
243
244 inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
245 inst->alg.cra_priority = alg->cra_priority;
246 inst->alg.cra_blocksize = alg->cra_blocksize;
247
248 if (alg->cra_alignmask < 7) inst->alg.cra_alignmask = 7;
249 else inst->alg.cra_alignmask = alg->cra_alignmask;
250 inst->alg.cra_type = &crypto_blkcipher_type;
251
252 if (!(alg->cra_blocksize % 4))
253 inst->alg.cra_alignmask |= 3;
254 inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
255 inst->alg.cra_blkcipher.min_keysize =
256 alg->cra_cipher.cia_min_keysize + alg->cra_blocksize;
257 inst->alg.cra_blkcipher.max_keysize =
258 alg->cra_cipher.cia_max_keysize + alg->cra_blocksize;
259
260 inst->alg.cra_ctxsize = sizeof(struct priv);
261
262 inst->alg.cra_init = init_tfm;
263 inst->alg.cra_exit = exit_tfm;
264
265 inst->alg.cra_blkcipher.setkey = setkey;
266 inst->alg.cra_blkcipher.encrypt = encrypt;
267 inst->alg.cra_blkcipher.decrypt = decrypt;
268
269out_put_alg:
270 crypto_mod_put(alg);
271 return inst;
272}
273
274static void free(struct crypto_instance *inst)
275{
276 crypto_drop_spawn(crypto_instance_ctx(inst));
277 kfree(inst);
278}
279
280static struct crypto_template crypto_tmpl = {
281 .name = "lrw",
282 .alloc = alloc,
283 .free = free,
284 .module = THIS_MODULE,
285};
286
287static int __init crypto_module_init(void)
288{
289 return crypto_register_template(&crypto_tmpl);
290}
291
292static void __exit crypto_module_exit(void)
293{
294 crypto_unregister_template(&crypto_tmpl);
295}
296
297module_init(crypto_module_init);
298module_exit(crypto_module_exit);
299
300MODULE_LICENSE("GPL");
301MODULE_DESCRIPTION("LRW block cipher mode");