summaryrefslogtreecommitdiffstats
path: root/crypto/skcipher.c
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2016-11-22 07:08:12 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2016-11-28 08:23:17 -0500
commitb286d8b1a690667e99a89d22245832b6898c6279 (patch)
tree45122c84a2ef38a1f2010db2a63a6823f796166b /crypto/skcipher.c
parent7cf31864e60f7f6ee6ad2e3ea1f3bae8844c0380 (diff)
crypto: skcipher - Add skcipher walk interface
This patch adds the skcipher walk interface which replaces both blkcipher walk and ablkcipher walk. Just like blkcipher walk it can also be used for AEAD algorithms. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto/skcipher.c')
-rw-r--r--crypto/skcipher.c511
1 files changed, 511 insertions, 0 deletions
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index f7d0018dcaee..0f3071991b13 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -14,9 +14,12 @@
14 * 14 *
15 */ 15 */
16 16
17#include <crypto/internal/aead.h>
17#include <crypto/internal/skcipher.h> 18#include <crypto/internal/skcipher.h>
19#include <crypto/scatterwalk.h>
18#include <linux/bug.h> 20#include <linux/bug.h>
19#include <linux/cryptouser.h> 21#include <linux/cryptouser.h>
22#include <linux/list.h>
20#include <linux/module.h> 23#include <linux/module.h>
21#include <linux/rtnetlink.h> 24#include <linux/rtnetlink.h>
22#include <linux/seq_file.h> 25#include <linux/seq_file.h>
@@ -24,6 +27,514 @@
24 27
25#include "internal.h" 28#include "internal.h"
26 29
30enum {
31 SKCIPHER_WALK_PHYS = 1 << 0,
32 SKCIPHER_WALK_SLOW = 1 << 1,
33 SKCIPHER_WALK_COPY = 1 << 2,
34 SKCIPHER_WALK_DIFF = 1 << 3,
35 SKCIPHER_WALK_SLEEP = 1 << 4,
36};
37
38struct skcipher_walk_buffer {
39 struct list_head entry;
40 struct scatter_walk dst;
41 unsigned int len;
42 u8 *data;
43 u8 buffer[];
44};
45
46static int skcipher_walk_next(struct skcipher_walk *walk);
47
48static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr)
49{
50 if (PageHighMem(scatterwalk_page(walk)))
51 kunmap_atomic(vaddr);
52}
53
54static inline void *skcipher_map(struct scatter_walk *walk)
55{
56 struct page *page = scatterwalk_page(walk);
57
58 return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
59 offset_in_page(walk->offset);
60}
61
62static inline void skcipher_map_src(struct skcipher_walk *walk)
63{
64 walk->src.virt.addr = skcipher_map(&walk->in);
65}
66
67static inline void skcipher_map_dst(struct skcipher_walk *walk)
68{
69 walk->dst.virt.addr = skcipher_map(&walk->out);
70}
71
72static inline void skcipher_unmap_src(struct skcipher_walk *walk)
73{
74 skcipher_unmap(&walk->in, walk->src.virt.addr);
75}
76
77static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
78{
79 skcipher_unmap(&walk->out, walk->dst.virt.addr);
80}
81
82static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
83{
84 return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
85}
86
87/* Get a spot of the specified length that does not straddle a page.
88 * The caller needs to ensure that there is enough space for this operation.
89 */
90static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
91{
92 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
93
94 return max(start, end_page);
95}
96
97static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
98{
99 u8 *addr;
100
101 addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
102 addr = skcipher_get_spot(addr, bsize);
103 scatterwalk_copychunks(addr, &walk->out, bsize,
104 (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
105 return 0;
106}
107
108int skcipher_walk_done(struct skcipher_walk *walk, int err)
109{
110 unsigned int n = walk->nbytes - err;
111 unsigned int nbytes;
112
113 nbytes = walk->total - n;
114
115 if (unlikely(err < 0)) {
116 nbytes = 0;
117 n = 0;
118 } else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
119 SKCIPHER_WALK_SLOW |
120 SKCIPHER_WALK_COPY |
121 SKCIPHER_WALK_DIFF)))) {
122unmap_src:
123 skcipher_unmap_src(walk);
124 } else if (walk->flags & SKCIPHER_WALK_DIFF) {
125 skcipher_unmap_dst(walk);
126 goto unmap_src;
127 } else if (walk->flags & SKCIPHER_WALK_COPY) {
128 skcipher_map_dst(walk);
129 memcpy(walk->dst.virt.addr, walk->page, n);
130 skcipher_unmap_dst(walk);
131 } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
132 if (WARN_ON(err)) {
133 err = -EINVAL;
134 nbytes = 0;
135 } else
136 n = skcipher_done_slow(walk, n);
137 }
138
139 if (err > 0)
140 err = 0;
141
142 walk->total = nbytes;
143 walk->nbytes = nbytes;
144
145 scatterwalk_advance(&walk->in, n);
146 scatterwalk_advance(&walk->out, n);
147 scatterwalk_done(&walk->in, 0, nbytes);
148 scatterwalk_done(&walk->out, 1, nbytes);
149
150 if (nbytes) {
151 crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
152 CRYPTO_TFM_REQ_MAY_SLEEP : 0);
153 return skcipher_walk_next(walk);
154 }
155
156 /* Short-circuit for the common/fast path. */
157 if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
158 goto out;
159
160 if (walk->flags & SKCIPHER_WALK_PHYS)
161 goto out;
162
163 if (walk->iv != walk->oiv)
164 memcpy(walk->oiv, walk->iv, walk->ivsize);
165 if (walk->buffer != walk->page)
166 kfree(walk->buffer);
167 if (walk->page)
168 free_page((unsigned long)walk->page);
169
170out:
171 return err;
172}
173EXPORT_SYMBOL_GPL(skcipher_walk_done);
174
175void skcipher_walk_complete(struct skcipher_walk *walk, int err)
176{
177 struct skcipher_walk_buffer *p, *tmp;
178
179 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
180 u8 *data;
181
182 if (err)
183 goto done;
184
185 data = p->data;
186 if (!data) {
187 data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
188 data = skcipher_get_spot(data, walk->chunksize);
189 }
190
191 scatterwalk_copychunks(data, &p->dst, p->len, 1);
192
193 if (offset_in_page(p->data) + p->len + walk->chunksize >
194 PAGE_SIZE)
195 free_page((unsigned long)p->data);
196
197done:
198 list_del(&p->entry);
199 kfree(p);
200 }
201
202 if (!err && walk->iv != walk->oiv)
203 memcpy(walk->oiv, walk->iv, walk->ivsize);
204 if (walk->buffer != walk->page)
205 kfree(walk->buffer);
206 if (walk->page)
207 free_page((unsigned long)walk->page);
208}
209EXPORT_SYMBOL_GPL(skcipher_walk_complete);
210
211static void skcipher_queue_write(struct skcipher_walk *walk,
212 struct skcipher_walk_buffer *p)
213{
214 p->dst = walk->out;
215 list_add_tail(&p->entry, &walk->buffers);
216}
217
218static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
219{
220 bool phys = walk->flags & SKCIPHER_WALK_PHYS;
221 unsigned alignmask = walk->alignmask;
222 struct skcipher_walk_buffer *p;
223 unsigned a;
224 unsigned n;
225 u8 *buffer;
226 void *v;
227
228 if (!phys) {
229 buffer = walk->buffer ?: walk->page;
230 if (buffer)
231 goto ok;
232 }
233
234 /* Start with the minimum alignment of kmalloc. */
235 a = crypto_tfm_ctx_alignment() - 1;
236 n = bsize;
237
238 if (phys) {
239 /* Calculate the minimum alignment of p->buffer. */
240 a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
241 n += sizeof(*p);
242 }
243
244 /* Minimum size to align p->buffer by alignmask. */
245 n += alignmask & ~a;
246
247 /* Minimum size to ensure p->buffer does not straddle a page. */
248 n += (bsize - 1) & ~(alignmask | a);
249
250 v = kzalloc(n, skcipher_walk_gfp(walk));
251 if (!v)
252 return skcipher_walk_done(walk, -ENOMEM);
253
254 if (phys) {
255 p = v;
256 p->len = bsize;
257 skcipher_queue_write(walk, p);
258 buffer = p->buffer;
259 } else {
260 walk->buffer = v;
261 buffer = v;
262 }
263
264ok:
265 walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
266 walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
267 walk->src.virt.addr = walk->dst.virt.addr;
268
269 scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
270
271 walk->nbytes = bsize;
272 walk->flags |= SKCIPHER_WALK_SLOW;
273
274 return 0;
275}
276
277static int skcipher_next_copy(struct skcipher_walk *walk)
278{
279 struct skcipher_walk_buffer *p;
280 u8 *tmp = walk->page;
281
282 skcipher_map_src(walk);
283 memcpy(tmp, walk->src.virt.addr, walk->nbytes);
284 skcipher_unmap_src(walk);
285
286 walk->src.virt.addr = tmp;
287 walk->dst.virt.addr = tmp;
288
289 if (!(walk->flags & SKCIPHER_WALK_PHYS))
290 return 0;
291
292 p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
293 if (!p)
294 return -ENOMEM;
295
296 p->data = walk->page;
297 p->len = walk->nbytes;
298 skcipher_queue_write(walk, p);
299
300 if (offset_in_page(walk->page) + walk->nbytes + walk->chunksize >
301 PAGE_SIZE)
302 walk->page = NULL;
303 else
304 walk->page += walk->nbytes;
305
306 return 0;
307}
308
309static int skcipher_next_fast(struct skcipher_walk *walk)
310{
311 unsigned long diff;
312
313 walk->src.phys.page = scatterwalk_page(&walk->in);
314 walk->src.phys.offset = offset_in_page(walk->in.offset);
315 walk->dst.phys.page = scatterwalk_page(&walk->out);
316 walk->dst.phys.offset = offset_in_page(walk->out.offset);
317
318 if (walk->flags & SKCIPHER_WALK_PHYS)
319 return 0;
320
321 diff = walk->src.phys.offset - walk->dst.phys.offset;
322 diff |= walk->src.virt.page - walk->dst.virt.page;
323
324 skcipher_map_src(walk);
325 walk->dst.virt.addr = walk->src.virt.addr;
326
327 if (diff) {
328 walk->flags |= SKCIPHER_WALK_DIFF;
329 skcipher_map_dst(walk);
330 }
331
332 return 0;
333}
334
335static int skcipher_walk_next(struct skcipher_walk *walk)
336{
337 unsigned int bsize;
338 unsigned int n;
339 int err;
340
341 walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
342 SKCIPHER_WALK_DIFF);
343
344 n = walk->total;
345 bsize = min(walk->chunksize, max(n, walk->blocksize));
346 n = scatterwalk_clamp(&walk->in, n);
347 n = scatterwalk_clamp(&walk->out, n);
348
349 if (unlikely(n < bsize)) {
350 if (unlikely(walk->total < walk->blocksize))
351 return skcipher_walk_done(walk, -EINVAL);
352
353slow_path:
354 err = skcipher_next_slow(walk, bsize);
355 goto set_phys_lowmem;
356 }
357
358 if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
359 if (!walk->page) {
360 gfp_t gfp = skcipher_walk_gfp(walk);
361
362 walk->page = (void *)__get_free_page(gfp);
363 if (!walk->page)
364 goto slow_path;
365 }
366
367 walk->nbytes = min_t(unsigned, n,
368 PAGE_SIZE - offset_in_page(walk->page));
369 walk->flags |= SKCIPHER_WALK_COPY;
370 err = skcipher_next_copy(walk);
371 goto set_phys_lowmem;
372 }
373
374 walk->nbytes = n;
375
376 return skcipher_next_fast(walk);
377
378set_phys_lowmem:
379 if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
380 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
381 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
382 walk->src.phys.offset &= PAGE_SIZE - 1;
383 walk->dst.phys.offset &= PAGE_SIZE - 1;
384 }
385 return err;
386}
387EXPORT_SYMBOL_GPL(skcipher_walk_next);
388
389static int skcipher_copy_iv(struct skcipher_walk *walk)
390{
391 unsigned a = crypto_tfm_ctx_alignment() - 1;
392 unsigned alignmask = walk->alignmask;
393 unsigned ivsize = walk->ivsize;
394 unsigned bs = walk->chunksize;
395 unsigned aligned_bs;
396 unsigned size;
397 u8 *iv;
398
399 aligned_bs = ALIGN(bs, alignmask);
400
401 /* Minimum size to align buffer by alignmask. */
402 size = alignmask & ~a;
403
404 if (walk->flags & SKCIPHER_WALK_PHYS)
405 size += ivsize;
406 else {
407 size += aligned_bs + ivsize;
408
409 /* Minimum size to ensure buffer does not straddle a page. */
410 size += (bs - 1) & ~(alignmask | a);
411 }
412
413 walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
414 if (!walk->buffer)
415 return -ENOMEM;
416
417 iv = PTR_ALIGN(walk->buffer, alignmask + 1);
418 iv = skcipher_get_spot(iv, bs) + aligned_bs;
419
420 walk->iv = memcpy(iv, walk->iv, walk->ivsize);
421 return 0;
422}
423
424static int skcipher_walk_first(struct skcipher_walk *walk)
425{
426 walk->nbytes = 0;
427
428 if (WARN_ON_ONCE(in_irq()))
429 return -EDEADLK;
430
431 if (unlikely(!walk->total))
432 return 0;
433
434 walk->buffer = NULL;
435 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
436 int err = skcipher_copy_iv(walk);
437 if (err)
438 return err;
439 }
440
441 walk->page = NULL;
442 walk->nbytes = walk->total;
443
444 return skcipher_walk_next(walk);
445}
446
447static int skcipher_walk_skcipher(struct skcipher_walk *walk,
448 struct skcipher_request *req)
449{
450 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
451
452 scatterwalk_start(&walk->in, req->src);
453 scatterwalk_start(&walk->out, req->dst);
454
455 walk->total = req->cryptlen;
456 walk->iv = req->iv;
457 walk->oiv = req->iv;
458
459 walk->flags &= ~SKCIPHER_WALK_SLEEP;
460 walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
461 SKCIPHER_WALK_SLEEP : 0;
462
463 walk->blocksize = crypto_skcipher_blocksize(tfm);
464 walk->chunksize = crypto_skcipher_chunksize(tfm);
465 walk->ivsize = crypto_skcipher_ivsize(tfm);
466 walk->alignmask = crypto_skcipher_alignmask(tfm);
467
468 return skcipher_walk_first(walk);
469}
470
471int skcipher_walk_virt(struct skcipher_walk *walk,
472 struct skcipher_request *req, bool atomic)
473{
474 int err;
475
476 walk->flags &= ~SKCIPHER_WALK_PHYS;
477
478 err = skcipher_walk_skcipher(walk, req);
479
480 walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
481
482 return err;
483}
484EXPORT_SYMBOL_GPL(skcipher_walk_virt);
485
486void skcipher_walk_atomise(struct skcipher_walk *walk)
487{
488 walk->flags &= ~SKCIPHER_WALK_SLEEP;
489}
490EXPORT_SYMBOL_GPL(skcipher_walk_atomise);
491
492int skcipher_walk_async(struct skcipher_walk *walk,
493 struct skcipher_request *req)
494{
495 walk->flags |= SKCIPHER_WALK_PHYS;
496
497 INIT_LIST_HEAD(&walk->buffers);
498
499 return skcipher_walk_skcipher(walk, req);
500}
501EXPORT_SYMBOL_GPL(skcipher_walk_async);
502
503int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
504 bool atomic)
505{
506 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
507 int err;
508
509 scatterwalk_start(&walk->in, req->src);
510 scatterwalk_start(&walk->out, req->dst);
511
512 scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
513 scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
514
515 walk->total = req->cryptlen;
516 walk->iv = req->iv;
517 walk->oiv = req->iv;
518
519 if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
520 walk->flags |= SKCIPHER_WALK_SLEEP;
521 else
522 walk->flags &= ~SKCIPHER_WALK_SLEEP;
523
524 walk->blocksize = crypto_aead_blocksize(tfm);
525 walk->chunksize = crypto_aead_chunksize(tfm);
526 walk->ivsize = crypto_aead_ivsize(tfm);
527 walk->alignmask = crypto_aead_alignmask(tfm);
528
529 err = skcipher_walk_first(walk);
530
531 if (atomic)
532 walk->flags &= ~SKCIPHER_WALK_SLEEP;
533
534 return err;
535}
536EXPORT_SYMBOL_GPL(skcipher_walk_aead);
537
27static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) 538static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
28{ 539{
29 if (alg->cra_type == &crypto_blkcipher_type) 540 if (alg->cra_type == &crypto_blkcipher_type)