diff options
Diffstat (limited to 'crypto/chainiv.c')
-rw-r--r-- | crypto/chainiv.c | 331 |
1 files changed, 331 insertions, 0 deletions
diff --git a/crypto/chainiv.c b/crypto/chainiv.c new file mode 100644 index 000000000000..d17fa0454dc3 --- /dev/null +++ b/crypto/chainiv.c | |||
@@ -0,0 +1,331 @@ | |||
1 | /* | ||
2 | * chainiv: Chain IV Generator | ||
3 | * | ||
4 | * Generate IVs simply be using the last block of the previous encryption. | ||
5 | * This is mainly useful for CBC with a synchronous algorithm. | ||
6 | * | ||
7 | * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the Free | ||
11 | * Software Foundation; either version 2 of the License, or (at your option) | ||
12 | * any later version. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #include <crypto/internal/skcipher.h> | ||
17 | #include <linux/err.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/random.h> | ||
22 | #include <linux/spinlock.h> | ||
23 | #include <linux/string.h> | ||
24 | #include <linux/workqueue.h> | ||
25 | |||
26 | enum { | ||
27 | CHAINIV_STATE_INUSE = 0, | ||
28 | }; | ||
29 | |||
30 | struct chainiv_ctx { | ||
31 | spinlock_t lock; | ||
32 | char iv[]; | ||
33 | }; | ||
34 | |||
35 | struct async_chainiv_ctx { | ||
36 | unsigned long state; | ||
37 | |||
38 | spinlock_t lock; | ||
39 | int err; | ||
40 | |||
41 | struct crypto_queue queue; | ||
42 | struct work_struct postponed; | ||
43 | |||
44 | char iv[]; | ||
45 | }; | ||
46 | |||
47 | static int chainiv_givencrypt(struct skcipher_givcrypt_request *req) | ||
48 | { | ||
49 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); | ||
50 | struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); | ||
51 | struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); | ||
52 | unsigned int ivsize; | ||
53 | int err; | ||
54 | |||
55 | ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); | ||
56 | ablkcipher_request_set_callback(subreq, req->creq.base.flags & | ||
57 | ~CRYPTO_TFM_REQ_MAY_SLEEP, | ||
58 | req->creq.base.complete, | ||
59 | req->creq.base.data); | ||
60 | ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst, | ||
61 | req->creq.nbytes, req->creq.info); | ||
62 | |||
63 | spin_lock_bh(&ctx->lock); | ||
64 | |||
65 | ivsize = crypto_ablkcipher_ivsize(geniv); | ||
66 | |||
67 | memcpy(req->giv, ctx->iv, ivsize); | ||
68 | memcpy(subreq->info, ctx->iv, ivsize); | ||
69 | |||
70 | err = crypto_ablkcipher_encrypt(subreq); | ||
71 | if (err) | ||
72 | goto unlock; | ||
73 | |||
74 | memcpy(ctx->iv, subreq->info, ivsize); | ||
75 | |||
76 | unlock: | ||
77 | spin_unlock_bh(&ctx->lock); | ||
78 | |||
79 | return err; | ||
80 | } | ||
81 | |||
82 | static int chainiv_givencrypt_first(struct skcipher_givcrypt_request *req) | ||
83 | { | ||
84 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); | ||
85 | struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); | ||
86 | |||
87 | spin_lock_bh(&ctx->lock); | ||
88 | if (crypto_ablkcipher_crt(geniv)->givencrypt != | ||
89 | chainiv_givencrypt_first) | ||
90 | goto unlock; | ||
91 | |||
92 | crypto_ablkcipher_crt(geniv)->givencrypt = chainiv_givencrypt; | ||
93 | get_random_bytes(ctx->iv, crypto_ablkcipher_ivsize(geniv)); | ||
94 | |||
95 | unlock: | ||
96 | spin_unlock_bh(&ctx->lock); | ||
97 | |||
98 | return chainiv_givencrypt(req); | ||
99 | } | ||
100 | |||
101 | static int chainiv_init_common(struct crypto_tfm *tfm) | ||
102 | { | ||
103 | tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request); | ||
104 | |||
105 | return skcipher_geniv_init(tfm); | ||
106 | } | ||
107 | |||
108 | static int chainiv_init(struct crypto_tfm *tfm) | ||
109 | { | ||
110 | struct chainiv_ctx *ctx = crypto_tfm_ctx(tfm); | ||
111 | |||
112 | spin_lock_init(&ctx->lock); | ||
113 | |||
114 | return chainiv_init_common(tfm); | ||
115 | } | ||
116 | |||
117 | static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx) | ||
118 | { | ||
119 | int queued; | ||
120 | |||
121 | if (!ctx->queue.qlen) { | ||
122 | smp_mb__before_clear_bit(); | ||
123 | clear_bit(CHAINIV_STATE_INUSE, &ctx->state); | ||
124 | |||
125 | if (!ctx->queue.qlen || | ||
126 | test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) | ||
127 | goto out; | ||
128 | } | ||
129 | |||
130 | queued = schedule_work(&ctx->postponed); | ||
131 | BUG_ON(!queued); | ||
132 | |||
133 | out: | ||
134 | return ctx->err; | ||
135 | } | ||
136 | |||
137 | static int async_chainiv_postpone_request(struct skcipher_givcrypt_request *req) | ||
138 | { | ||
139 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); | ||
140 | struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); | ||
141 | int err; | ||
142 | |||
143 | spin_lock_bh(&ctx->lock); | ||
144 | err = skcipher_enqueue_givcrypt(&ctx->queue, req); | ||
145 | spin_unlock_bh(&ctx->lock); | ||
146 | |||
147 | if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) | ||
148 | return err; | ||
149 | |||
150 | ctx->err = err; | ||
151 | return async_chainiv_schedule_work(ctx); | ||
152 | } | ||
153 | |||
154 | static int async_chainiv_givencrypt_tail(struct skcipher_givcrypt_request *req) | ||
155 | { | ||
156 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); | ||
157 | struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); | ||
158 | struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); | ||
159 | unsigned int ivsize = crypto_ablkcipher_ivsize(geniv); | ||
160 | |||
161 | memcpy(req->giv, ctx->iv, ivsize); | ||
162 | memcpy(subreq->info, ctx->iv, ivsize); | ||
163 | |||
164 | ctx->err = crypto_ablkcipher_encrypt(subreq); | ||
165 | if (ctx->err) | ||
166 | goto out; | ||
167 | |||
168 | memcpy(ctx->iv, subreq->info, ivsize); | ||
169 | |||
170 | out: | ||
171 | return async_chainiv_schedule_work(ctx); | ||
172 | } | ||
173 | |||
174 | static int async_chainiv_givencrypt(struct skcipher_givcrypt_request *req) | ||
175 | { | ||
176 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); | ||
177 | struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); | ||
178 | struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); | ||
179 | |||
180 | ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); | ||
181 | ablkcipher_request_set_callback(subreq, req->creq.base.flags, | ||
182 | req->creq.base.complete, | ||
183 | req->creq.base.data); | ||
184 | ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst, | ||
185 | req->creq.nbytes, req->creq.info); | ||
186 | |||
187 | if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) | ||
188 | goto postpone; | ||
189 | |||
190 | if (ctx->queue.qlen) { | ||
191 | clear_bit(CHAINIV_STATE_INUSE, &ctx->state); | ||
192 | goto postpone; | ||
193 | } | ||
194 | |||
195 | return async_chainiv_givencrypt_tail(req); | ||
196 | |||
197 | postpone: | ||
198 | return async_chainiv_postpone_request(req); | ||
199 | } | ||
200 | |||
201 | static int async_chainiv_givencrypt_first(struct skcipher_givcrypt_request *req) | ||
202 | { | ||
203 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); | ||
204 | struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); | ||
205 | |||
206 | if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) | ||
207 | goto out; | ||
208 | |||
209 | if (crypto_ablkcipher_crt(geniv)->givencrypt != | ||
210 | async_chainiv_givencrypt_first) | ||
211 | goto unlock; | ||
212 | |||
213 | crypto_ablkcipher_crt(geniv)->givencrypt = async_chainiv_givencrypt; | ||
214 | get_random_bytes(ctx->iv, crypto_ablkcipher_ivsize(geniv)); | ||
215 | |||
216 | unlock: | ||
217 | clear_bit(CHAINIV_STATE_INUSE, &ctx->state); | ||
218 | |||
219 | out: | ||
220 | return async_chainiv_givencrypt(req); | ||
221 | } | ||
222 | |||
223 | static void async_chainiv_do_postponed(struct work_struct *work) | ||
224 | { | ||
225 | struct async_chainiv_ctx *ctx = container_of(work, | ||
226 | struct async_chainiv_ctx, | ||
227 | postponed); | ||
228 | struct skcipher_givcrypt_request *req; | ||
229 | struct ablkcipher_request *subreq; | ||
230 | |||
231 | /* Only handle one request at a time to avoid hogging keventd. */ | ||
232 | spin_lock_bh(&ctx->lock); | ||
233 | req = skcipher_dequeue_givcrypt(&ctx->queue); | ||
234 | spin_unlock_bh(&ctx->lock); | ||
235 | |||
236 | if (!req) { | ||
237 | async_chainiv_schedule_work(ctx); | ||
238 | return; | ||
239 | } | ||
240 | |||
241 | subreq = skcipher_givcrypt_reqctx(req); | ||
242 | subreq->base.flags |= CRYPTO_TFM_REQ_MAY_SLEEP; | ||
243 | |||
244 | async_chainiv_givencrypt_tail(req); | ||
245 | } | ||
246 | |||
247 | static int async_chainiv_init(struct crypto_tfm *tfm) | ||
248 | { | ||
249 | struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm); | ||
250 | |||
251 | spin_lock_init(&ctx->lock); | ||
252 | |||
253 | crypto_init_queue(&ctx->queue, 100); | ||
254 | INIT_WORK(&ctx->postponed, async_chainiv_do_postponed); | ||
255 | |||
256 | return chainiv_init_common(tfm); | ||
257 | } | ||
258 | |||
259 | static void async_chainiv_exit(struct crypto_tfm *tfm) | ||
260 | { | ||
261 | struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm); | ||
262 | |||
263 | BUG_ON(test_bit(CHAINIV_STATE_INUSE, &ctx->state) || ctx->queue.qlen); | ||
264 | |||
265 | skcipher_geniv_exit(tfm); | ||
266 | } | ||
267 | |||
268 | static struct crypto_template chainiv_tmpl; | ||
269 | |||
270 | static struct crypto_instance *chainiv_alloc(struct rtattr **tb) | ||
271 | { | ||
272 | struct crypto_attr_type *algt; | ||
273 | struct crypto_instance *inst; | ||
274 | int err; | ||
275 | |||
276 | algt = crypto_get_attr_type(tb); | ||
277 | err = PTR_ERR(algt); | ||
278 | if (IS_ERR(algt)) | ||
279 | return ERR_PTR(err); | ||
280 | |||
281 | inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0, 0); | ||
282 | if (IS_ERR(inst)) | ||
283 | goto out; | ||
284 | |||
285 | inst->alg.cra_ablkcipher.givencrypt = chainiv_givencrypt_first; | ||
286 | |||
287 | inst->alg.cra_init = chainiv_init; | ||
288 | inst->alg.cra_exit = skcipher_geniv_exit; | ||
289 | |||
290 | inst->alg.cra_ctxsize = sizeof(struct chainiv_ctx); | ||
291 | |||
292 | if (!crypto_requires_sync(algt->type, algt->mask)) { | ||
293 | inst->alg.cra_flags |= CRYPTO_ALG_ASYNC; | ||
294 | |||
295 | inst->alg.cra_ablkcipher.givencrypt = | ||
296 | async_chainiv_givencrypt_first; | ||
297 | |||
298 | inst->alg.cra_init = async_chainiv_init; | ||
299 | inst->alg.cra_exit = async_chainiv_exit; | ||
300 | |||
301 | inst->alg.cra_ctxsize = sizeof(struct async_chainiv_ctx); | ||
302 | } | ||
303 | |||
304 | inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize; | ||
305 | |||
306 | out: | ||
307 | return inst; | ||
308 | } | ||
309 | |||
310 | static struct crypto_template chainiv_tmpl = { | ||
311 | .name = "chainiv", | ||
312 | .alloc = chainiv_alloc, | ||
313 | .free = skcipher_geniv_free, | ||
314 | .module = THIS_MODULE, | ||
315 | }; | ||
316 | |||
317 | static int __init chainiv_module_init(void) | ||
318 | { | ||
319 | return crypto_register_template(&chainiv_tmpl); | ||
320 | } | ||
321 | |||
322 | static void __exit chainiv_module_exit(void) | ||
323 | { | ||
324 | crypto_unregister_template(&chainiv_tmpl); | ||
325 | } | ||
326 | |||
327 | module_init(chainiv_module_init); | ||
328 | module_exit(chainiv_module_exit); | ||
329 | |||
330 | MODULE_LICENSE("GPL"); | ||
331 | MODULE_DESCRIPTION("Chain IV Generator"); | ||