summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrzej Zaborowski <andrew.zaborowski@intel.com>2015-12-05 11:09:34 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2015-12-09 07:03:57 -0500
commit3d5b1ecdea6fb94f8c61554fcb2ba776a2d3d0e6 (patch)
tree0566eca107c60adc4d962d84dac952ec793b49ef
parent28a4618ad14cf17009a87d8b5718132a5d4ef852 (diff)
crypto: rsa - RSA padding algorithm
This patch adds PKCS#1 v1.5 standard RSA padding as a separate template. This way an RSA cipher with padding can be obtained by instantiating "pkcs1pad(rsa)". The reason for adding this is that RSA is almost never used without this padding (or OAEP) so it will be needed for either certificate work in the kernel or the userspace, and I also hear that it is likely implemented by hardware RSA in which case hardware implementations of the whole of pkcs1pad(rsa) can be provided. Signed-off-by: Andrew Zaborowski <andrew.zaborowski@intel.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--crypto/Makefile1
-rw-r--r--crypto/rsa-pkcs1pad.c617
-rw-r--r--crypto/rsa.c16
-rw-r--r--include/crypto/internal/rsa.h2
4 files changed, 635 insertions, 1 deletions
diff --git a/crypto/Makefile b/crypto/Makefile
index f7aba923458d..2acdbbd30475 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -40,6 +40,7 @@ rsa_generic-y := rsapubkey-asn1.o
40rsa_generic-y += rsaprivkey-asn1.o 40rsa_generic-y += rsaprivkey-asn1.o
41rsa_generic-y += rsa.o 41rsa_generic-y += rsa.o
42rsa_generic-y += rsa_helper.o 42rsa_generic-y += rsa_helper.o
43rsa_generic-y += rsa-pkcs1pad.o
43obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o 44obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o
44 45
45cryptomgr-y := algboss.o testmgr.o 46cryptomgr-y := algboss.o testmgr.o
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
new file mode 100644
index 000000000000..accc67d16686
--- /dev/null
+++ b/crypto/rsa-pkcs1pad.c
@@ -0,0 +1,617 @@
1/*
2 * RSA padding templates.
3 *
4 * Copyright (c) 2015 Intel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 */
11
12#include <crypto/algapi.h>
13#include <crypto/akcipher.h>
14#include <crypto/internal/akcipher.h>
15#include <linux/err.h>
16#include <linux/init.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/random.h>
20
21struct pkcs1pad_ctx {
22 struct crypto_akcipher *child;
23
24 unsigned int key_size;
25};
26
27struct pkcs1pad_request {
28 struct akcipher_request child_req;
29
30 struct scatterlist in_sg[3], out_sg[2];
31 uint8_t *in_buf, *out_buf;
32};
33
34static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key,
35 unsigned int keylen)
36{
37 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
38 int err, size;
39
40 err = crypto_akcipher_set_pub_key(ctx->child, key, keylen);
41
42 if (!err) {
43 /* Find out new modulus size from rsa implementation */
44 size = crypto_akcipher_maxsize(ctx->child);
45
46 ctx->key_size = size > 0 ? size : 0;
47 if (size <= 0)
48 err = size;
49 }
50
51 return err;
52}
53
54static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key,
55 unsigned int keylen)
56{
57 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
58 int err, size;
59
60 err = crypto_akcipher_set_priv_key(ctx->child, key, keylen);
61
62 if (!err) {
63 /* Find out new modulus size from rsa implementation */
64 size = crypto_akcipher_maxsize(ctx->child);
65
66 ctx->key_size = size > 0 ? size : 0;
67 if (size <= 0)
68 err = size;
69 }
70
71 return err;
72}
73
74static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
75{
76 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
77
78 /*
79 * The maximum destination buffer size for the encrypt/sign operations
80 * will be the same as for RSA, even though it's smaller for
81 * decrypt/verify.
82 */
83
84 return ctx->key_size ?: -EINVAL;
85}
86
87static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len,
88 struct scatterlist *next)
89{
90 int nsegs = next ? 1 : 0;
91
92 if (offset_in_page(buf) + len <= PAGE_SIZE) {
93 nsegs += 1;
94 sg_init_table(sg, nsegs);
95 sg_set_buf(sg, buf, len);
96 } else {
97 nsegs += 2;
98 sg_init_table(sg, nsegs);
99 sg_set_buf(sg + 0, buf, PAGE_SIZE - offset_in_page(buf));
100 sg_set_buf(sg + 1, buf + PAGE_SIZE - offset_in_page(buf),
101 offset_in_page(buf) + len - PAGE_SIZE);
102 }
103
104 if (next)
105 sg_chain(sg, nsegs, next);
106}
107
108static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err)
109{
110 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
111 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
112 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
113 uint8_t zeros[ctx->key_size - req_ctx->child_req.dst_len];
114
115 if (!err) {
116 if (req_ctx->child_req.dst_len < ctx->key_size) {
117 memset(zeros, 0, sizeof(zeros));
118 sg_copy_from_buffer(req->dst,
119 sg_nents_for_len(req->dst,
120 sizeof(zeros)),
121 zeros, sizeof(zeros));
122 }
123
124 sg_pcopy_from_buffer(req->dst,
125 sg_nents_for_len(req->dst, ctx->key_size),
126 req_ctx->out_buf, req_ctx->child_req.dst_len,
127 sizeof(zeros));
128 }
129 req->dst_len = ctx->key_size;
130
131 kfree(req_ctx->in_buf);
132 kzfree(req_ctx->out_buf);
133
134 return err;
135}
136
137static void pkcs1pad_encrypt_sign_complete_cb(
138 struct crypto_async_request *child_async_req, int err)
139{
140 struct akcipher_request *req = child_async_req->data;
141 struct crypto_async_request async_req;
142
143 if (err == -EINPROGRESS)
144 return;
145
146 async_req.data = req->base.data;
147 async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
148 async_req.flags = child_async_req->flags;
149 req->base.complete(&async_req,
150 pkcs1pad_encrypt_sign_complete(req, err));
151}
152
153static int pkcs1pad_encrypt(struct akcipher_request *req)
154{
155 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
156 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
157 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
158 int err;
159 unsigned int i, ps_end;
160
161 if (!ctx->key_size)
162 return -EINVAL;
163
164 if (req->src_len > ctx->key_size - 11)
165 return -EOVERFLOW;
166
167 if (req->dst_len < ctx->key_size) {
168 req->dst_len = ctx->key_size;
169 return -EOVERFLOW;
170 }
171
172 if (ctx->key_size > PAGE_SIZE)
173 return -ENOTSUPP;
174
175 /*
176 * Replace both input and output to add the padding in the input and
177 * the potential missing leading zeros in the output.
178 */
179 req_ctx->child_req.src = req_ctx->in_sg;
180 req_ctx->child_req.src_len = ctx->key_size - 1;
181 req_ctx->child_req.dst = req_ctx->out_sg;
182 req_ctx->child_req.dst_len = ctx->key_size;
183
184 req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
185 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
186 GFP_KERNEL : GFP_ATOMIC);
187 if (!req_ctx->in_buf)
188 return -ENOMEM;
189
190 ps_end = ctx->key_size - req->src_len - 2;
191 req_ctx->in_buf[0] = 0x02;
192 for (i = 1; i < ps_end; i++)
193 req_ctx->in_buf[i] = 1 + prandom_u32_max(255);
194 req_ctx->in_buf[ps_end] = 0x00;
195
196 pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
197 ctx->key_size - 1 - req->src_len, req->src);
198
199 req_ctx->out_buf = kmalloc(ctx->key_size,
200 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
201 GFP_KERNEL : GFP_ATOMIC);
202 if (!req_ctx->out_buf) {
203 kfree(req_ctx->in_buf);
204 return -ENOMEM;
205 }
206
207 pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
208 ctx->key_size, NULL);
209
210 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
211 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
212 pkcs1pad_encrypt_sign_complete_cb, req);
213
214 err = crypto_akcipher_encrypt(&req_ctx->child_req);
215 if (err != -EINPROGRESS &&
216 (err != -EBUSY ||
217 !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
218 return pkcs1pad_encrypt_sign_complete(req, err);
219
220 return err;
221}
222
223static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err)
224{
225 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
226 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
227 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
228 unsigned int pos;
229
230 if (err == -EOVERFLOW)
231 /* Decrypted value had no leading 0 byte */
232 err = -EINVAL;
233
234 if (err)
235 goto done;
236
237 if (req_ctx->child_req.dst_len != ctx->key_size - 1) {
238 err = -EINVAL;
239 goto done;
240 }
241
242 if (req_ctx->out_buf[0] != 0x02) {
243 err = -EINVAL;
244 goto done;
245 }
246 for (pos = 1; pos < req_ctx->child_req.dst_len; pos++)
247 if (req_ctx->out_buf[pos] == 0x00)
248 break;
249 if (pos < 9 || pos == req_ctx->child_req.dst_len) {
250 err = -EINVAL;
251 goto done;
252 }
253 pos++;
254
255 if (req->dst_len < req_ctx->child_req.dst_len - pos)
256 err = -EOVERFLOW;
257 req->dst_len = req_ctx->child_req.dst_len - pos;
258
259 if (!err)
260 sg_copy_from_buffer(req->dst,
261 sg_nents_for_len(req->dst, req->dst_len),
262 req_ctx->out_buf + pos, req->dst_len);
263
264done:
265 kzfree(req_ctx->out_buf);
266
267 return err;
268}
269
270static void pkcs1pad_decrypt_complete_cb(
271 struct crypto_async_request *child_async_req, int err)
272{
273 struct akcipher_request *req = child_async_req->data;
274 struct crypto_async_request async_req;
275
276 if (err == -EINPROGRESS)
277 return;
278
279 async_req.data = req->base.data;
280 async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
281 async_req.flags = child_async_req->flags;
282 req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err));
283}
284
285static int pkcs1pad_decrypt(struct akcipher_request *req)
286{
287 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
288 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
289 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
290 int err;
291
292 if (!ctx->key_size || req->src_len != ctx->key_size)
293 return -EINVAL;
294
295 if (ctx->key_size > PAGE_SIZE)
296 return -ENOTSUPP;
297
298 /* Reuse input buffer, output to a new buffer */
299 req_ctx->child_req.src = req->src;
300 req_ctx->child_req.src_len = req->src_len;
301 req_ctx->child_req.dst = req_ctx->out_sg;
302 req_ctx->child_req.dst_len = ctx->key_size - 1;
303
304 req_ctx->out_buf = kmalloc(ctx->key_size - 1,
305 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
306 GFP_KERNEL : GFP_ATOMIC);
307 if (!req_ctx->out_buf)
308 return -ENOMEM;
309
310 pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
311 ctx->key_size - 1, NULL);
312
313 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
314 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
315 pkcs1pad_decrypt_complete_cb, req);
316
317 err = crypto_akcipher_decrypt(&req_ctx->child_req);
318 if (err != -EINPROGRESS &&
319 (err != -EBUSY ||
320 !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
321 return pkcs1pad_decrypt_complete(req, err);
322
323 return err;
324}
325
326static int pkcs1pad_sign(struct akcipher_request *req)
327{
328 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
329 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
330 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
331 int err;
332 unsigned int ps_end;
333
334 if (!ctx->key_size)
335 return -EINVAL;
336
337 if (req->src_len > ctx->key_size - 11)
338 return -EOVERFLOW;
339
340 if (req->dst_len < ctx->key_size) {
341 req->dst_len = ctx->key_size;
342 return -EOVERFLOW;
343 }
344
345 if (ctx->key_size > PAGE_SIZE)
346 return -ENOTSUPP;
347
348 /*
349 * Replace both input and output to add the padding in the input and
350 * the potential missing leading zeros in the output.
351 */
352 req_ctx->child_req.src = req_ctx->in_sg;
353 req_ctx->child_req.src_len = ctx->key_size - 1;
354 req_ctx->child_req.dst = req_ctx->out_sg;
355 req_ctx->child_req.dst_len = ctx->key_size;
356
357 req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
358 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
359 GFP_KERNEL : GFP_ATOMIC);
360 if (!req_ctx->in_buf)
361 return -ENOMEM;
362
363 ps_end = ctx->key_size - req->src_len - 2;
364 req_ctx->in_buf[0] = 0x01;
365 memset(req_ctx->in_buf + 1, 0xff, ps_end - 1);
366 req_ctx->in_buf[ps_end] = 0x00;
367
368 pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
369 ctx->key_size - 1 - req->src_len, req->src);
370
371 req_ctx->out_buf = kmalloc(ctx->key_size,
372 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
373 GFP_KERNEL : GFP_ATOMIC);
374 if (!req_ctx->out_buf) {
375 kfree(req_ctx->in_buf);
376 return -ENOMEM;
377 }
378
379 pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
380 ctx->key_size, NULL);
381
382 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
383 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
384 pkcs1pad_encrypt_sign_complete_cb, req);
385
386 err = crypto_akcipher_sign(&req_ctx->child_req);
387 if (err != -EINPROGRESS &&
388 (err != -EBUSY ||
389 !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
390 return pkcs1pad_encrypt_sign_complete(req, err);
391
392 return err;
393}
394
395static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
396{
397 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
398 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
399 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
400 unsigned int pos;
401
402 if (err == -EOVERFLOW)
403 /* Decrypted value had no leading 0 byte */
404 err = -EINVAL;
405
406 if (err)
407 goto done;
408
409 if (req_ctx->child_req.dst_len != ctx->key_size - 1) {
410 err = -EINVAL;
411 goto done;
412 }
413
414 if (req_ctx->out_buf[0] != 0x01) {
415 err = -EINVAL;
416 goto done;
417 }
418 for (pos = 1; pos < req_ctx->child_req.dst_len; pos++)
419 if (req_ctx->out_buf[pos] != 0xff)
420 break;
421 if (pos < 9 || pos == req_ctx->child_req.dst_len ||
422 req_ctx->out_buf[pos] != 0x00) {
423 err = -EINVAL;
424 goto done;
425 }
426 pos++;
427
428 if (req->dst_len < req_ctx->child_req.dst_len - pos)
429 err = -EOVERFLOW;
430 req->dst_len = req_ctx->child_req.dst_len - pos;
431
432 if (!err)
433 sg_copy_from_buffer(req->dst,
434 sg_nents_for_len(req->dst, req->dst_len),
435 req_ctx->out_buf + pos, req->dst_len);
436
437done:
438 kzfree(req_ctx->out_buf);
439
440 return err;
441}
442
443static void pkcs1pad_verify_complete_cb(
444 struct crypto_async_request *child_async_req, int err)
445{
446 struct akcipher_request *req = child_async_req->data;
447 struct crypto_async_request async_req;
448
449 if (err == -EINPROGRESS)
450 return;
451
452 async_req.data = req->base.data;
453 async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
454 async_req.flags = child_async_req->flags;
455 req->base.complete(&async_req, pkcs1pad_verify_complete(req, err));
456}
457
458/*
459 * The verify operation is here for completeness similar to the verification
460 * defined in RFC2313 section 10.2 except that block type 0 is not accepted,
461 * as in RFC2437. RFC2437 section 9.2 doesn't define any operation to
462 * retrieve the DigestInfo from a signature, instead the user is expected
463 * to call the sign operation to generate the expected signature and compare
464 * signatures instead of the message-digests.
465 */
466static int pkcs1pad_verify(struct akcipher_request *req)
467{
468 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
469 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
470 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
471 int err;
472
473 if (!ctx->key_size || req->src_len != ctx->key_size)
474 return -EINVAL;
475
476 if (ctx->key_size > PAGE_SIZE)
477 return -ENOTSUPP;
478
479 /* Reuse input buffer, output to a new buffer */
480 req_ctx->child_req.src = req->src;
481 req_ctx->child_req.src_len = req->src_len;
482 req_ctx->child_req.dst = req_ctx->out_sg;
483 req_ctx->child_req.dst_len = ctx->key_size - 1;
484
485 req_ctx->out_buf = kmalloc(ctx->key_size - 1,
486 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
487 GFP_KERNEL : GFP_ATOMIC);
488 if (!req_ctx->out_buf)
489 return -ENOMEM;
490
491 pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
492 ctx->key_size - 1, NULL);
493
494 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
495 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
496 pkcs1pad_verify_complete_cb, req);
497
498 err = crypto_akcipher_verify(&req_ctx->child_req);
499 if (err != -EINPROGRESS &&
500 (err != -EBUSY ||
501 !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
502 return pkcs1pad_verify_complete(req, err);
503
504 return err;
505}
506
507static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm)
508{
509 struct akcipher_instance *inst = akcipher_alg_instance(tfm);
510 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
511 struct crypto_akcipher *child_tfm;
512
513 child_tfm = crypto_spawn_akcipher(akcipher_instance_ctx(inst));
514 if (IS_ERR(child_tfm))
515 return PTR_ERR(child_tfm);
516
517 ctx->child = child_tfm;
518
519 return 0;
520}
521
522static void pkcs1pad_exit_tfm(struct crypto_akcipher *tfm)
523{
524 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
525
526 crypto_free_akcipher(ctx->child);
527}
528
529static void pkcs1pad_free(struct akcipher_instance *inst)
530{
531 struct crypto_akcipher_spawn *spawn = akcipher_instance_ctx(inst);
532
533 crypto_drop_akcipher(spawn);
534
535 kfree(inst);
536}
537
538static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
539{
540 struct crypto_attr_type *algt;
541 struct akcipher_instance *inst;
542 struct crypto_akcipher_spawn *spawn;
543 struct akcipher_alg *rsa_alg;
544 const char *rsa_alg_name;
545 int err;
546
547 algt = crypto_get_attr_type(tb);
548 if (IS_ERR(algt))
549 return PTR_ERR(algt);
550
551 if ((algt->type ^ CRYPTO_ALG_TYPE_AKCIPHER) & algt->mask)
552 return -EINVAL;
553
554 rsa_alg_name = crypto_attr_alg_name(tb[1]);
555 if (IS_ERR(rsa_alg_name))
556 return PTR_ERR(rsa_alg_name);
557
558 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
559 if (!inst)
560 return -ENOMEM;
561
562 spawn = akcipher_instance_ctx(inst);
563 crypto_set_spawn(&spawn->base, akcipher_crypto_instance(inst));
564 err = crypto_grab_akcipher(spawn, rsa_alg_name, 0,
565 crypto_requires_sync(algt->type, algt->mask));
566 if (err)
567 goto out_free_inst;
568
569 rsa_alg = crypto_spawn_akcipher_alg(spawn);
570
571 err = -ENAMETOOLONG;
572 if (snprintf(inst->alg.base.cra_name,
573 CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
574 rsa_alg->base.cra_name) >=
575 CRYPTO_MAX_ALG_NAME ||
576 snprintf(inst->alg.base.cra_driver_name,
577 CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
578 rsa_alg->base.cra_driver_name) >=
579 CRYPTO_MAX_ALG_NAME)
580 goto out_drop_alg;
581
582 inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC;
583 inst->alg.base.cra_priority = rsa_alg->base.cra_priority;
584 inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx);
585
586 inst->alg.init = pkcs1pad_init_tfm;
587 inst->alg.exit = pkcs1pad_exit_tfm;
588
589 inst->alg.encrypt = pkcs1pad_encrypt;
590 inst->alg.decrypt = pkcs1pad_decrypt;
591 inst->alg.sign = pkcs1pad_sign;
592 inst->alg.verify = pkcs1pad_verify;
593 inst->alg.set_pub_key = pkcs1pad_set_pub_key;
594 inst->alg.set_priv_key = pkcs1pad_set_priv_key;
595 inst->alg.max_size = pkcs1pad_get_max_size;
596 inst->alg.reqsize = sizeof(struct pkcs1pad_request) + rsa_alg->reqsize;
597
598 inst->free = pkcs1pad_free;
599
600 err = akcipher_register_instance(tmpl, inst);
601 if (err)
602 goto out_drop_alg;
603
604 return 0;
605
606out_drop_alg:
607 crypto_drop_akcipher(spawn);
608out_free_inst:
609 kfree(inst);
610 return err;
611}
612
613struct crypto_template rsa_pkcs1pad_tmpl = {
614 .name = "pkcs1pad",
615 .create = pkcs1pad_create,
616 .module = THIS_MODULE,
617};
diff --git a/crypto/rsa.c b/crypto/rsa.c
index 58aad69a490c..77d737f52147 100644
--- a/crypto/rsa.c
+++ b/crypto/rsa.c
@@ -13,6 +13,7 @@
13#include <crypto/internal/rsa.h> 13#include <crypto/internal/rsa.h>
14#include <crypto/internal/akcipher.h> 14#include <crypto/internal/akcipher.h>
15#include <crypto/akcipher.h> 15#include <crypto/akcipher.h>
16#include <crypto/algapi.h>
16 17
17/* 18/*
18 * RSAEP function [RFC3447 sec 5.1.1] 19 * RSAEP function [RFC3447 sec 5.1.1]
@@ -315,11 +316,24 @@ static struct akcipher_alg rsa = {
315 316
316static int rsa_init(void) 317static int rsa_init(void)
317{ 318{
318 return crypto_register_akcipher(&rsa); 319 int err;
320
321 err = crypto_register_akcipher(&rsa);
322 if (err)
323 return err;
324
325 err = crypto_register_template(&rsa_pkcs1pad_tmpl);
326 if (err) {
327 crypto_unregister_akcipher(&rsa);
328 return err;
329 }
330
331 return 0;
319} 332}
320 333
321static void rsa_exit(void) 334static void rsa_exit(void)
322{ 335{
336 crypto_unregister_template(&rsa_pkcs1pad_tmpl);
323 crypto_unregister_akcipher(&rsa); 337 crypto_unregister_akcipher(&rsa);
324} 338}
325 339
diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h
index f997e2d29b5a..c7585bdecbc2 100644
--- a/include/crypto/internal/rsa.h
+++ b/include/crypto/internal/rsa.h
@@ -27,4 +27,6 @@ int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key,
27 unsigned int key_len); 27 unsigned int key_len);
28 28
29void rsa_free_key(struct rsa_key *rsa_key); 29void rsa_free_key(struct rsa_key *rsa_key);
30
31extern struct crypto_template rsa_pkcs1pad_tmpl;
30#endif 32#endif