aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorTadeusz Struk <tadeusz.struk@intel.com>2015-10-08 12:26:55 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2015-10-14 10:23:16 -0400
commit22287b0b5988b603b5f0daa282c89aaf2b877313 (patch)
treec8a1d714f4184feafc2885ab4a2968451397da03 /drivers/crypto
parent2d4d1eea540b27c72488fd1914674c42473d53df (diff)
crypto: akcipher - Changes to asymmetric key API
Setkey function has been split into set_priv_key and set_pub_key. Akcipher requests takes sgl for src and dst instead of void *. Users of the API i.e. two existing RSA implementation and test mgr code have been updated accordingly. Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/qat/qat_common/Makefile12
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c213
-rw-r--r--drivers/crypto/qat/qat_common/qat_rsakey.asn15
-rw-r--r--drivers/crypto/qat/qat_common/qat_rsaprivkey.asn111
-rw-r--r--drivers/crypto/qat/qat_common/qat_rsapubkey.asn14
5 files changed, 182 insertions, 63 deletions
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
index df20a9de1c58..9e9e196c6d51 100644
--- a/drivers/crypto/qat/qat_common/Makefile
+++ b/drivers/crypto/qat/qat_common/Makefile
@@ -1,5 +1,10 @@
1$(obj)/qat_rsakey-asn1.o: $(obj)/qat_rsakey-asn1.c $(obj)/qat_rsakey-asn1.h 1$(obj)/qat_rsapubkey-asn1.o: $(obj)/qat_rsapubkey-asn1.c \
2clean-files += qat_rsakey-asn1.c qat_rsakey-asn1.h 2 $(obj)/qat_rsapubkey-asn1.h
3$(obj)/qat_rsaprivkey-asn1.o: $(obj)/qat_rsaprivkey-asn1.c \
4 $(obj)/qat_rsaprivkey-asn1.h
5
6clean-files += qat_rsapubkey-asn1.c qat_rsapubkey-asn1.h
7clean-files += qat_rsaprivkey-asn1.c qat_rsapvivkey-asn1.h
3 8
4obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o 9obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
5intel_qat-objs := adf_cfg.o \ 10intel_qat-objs := adf_cfg.o \
@@ -13,7 +18,8 @@ intel_qat-objs := adf_cfg.o \
13 adf_hw_arbiter.o \ 18 adf_hw_arbiter.o \
14 qat_crypto.o \ 19 qat_crypto.o \
15 qat_algs.o \ 20 qat_algs.o \
16 qat_rsakey-asn1.o \ 21 qat_rsapubkey-asn1.o \
22 qat_rsaprivkey-asn1.o \
17 qat_asym_algs.o \ 23 qat_asym_algs.o \
18 qat_uclo.o \ 24 qat_uclo.o \
19 qat_hal.o 25 qat_hal.o
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index e87f51023ba4..51c594fdacdc 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -51,7 +51,9 @@
51#include <crypto/akcipher.h> 51#include <crypto/akcipher.h>
52#include <linux/dma-mapping.h> 52#include <linux/dma-mapping.h>
53#include <linux/fips.h> 53#include <linux/fips.h>
54#include "qat_rsakey-asn1.h" 54#include <crypto/scatterwalk.h>
55#include "qat_rsapubkey-asn1.h"
56#include "qat_rsaprivkey-asn1.h"
55#include "icp_qat_fw_pke.h" 57#include "icp_qat_fw_pke.h"
56#include "adf_accel_devices.h" 58#include "adf_accel_devices.h"
57#include "adf_transport.h" 59#include "adf_transport.h"
@@ -106,6 +108,7 @@ struct qat_rsa_request {
106 dma_addr_t phy_in; 108 dma_addr_t phy_in;
107 dma_addr_t phy_out; 109 dma_addr_t phy_out;
108 char *src_align; 110 char *src_align;
111 char *dst_align;
109 struct icp_qat_fw_pke_request req; 112 struct icp_qat_fw_pke_request req;
110 struct qat_rsa_ctx *ctx; 113 struct qat_rsa_ctx *ctx;
111 int err; 114 int err;
@@ -118,7 +121,6 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
118 struct device *dev = &GET_DEV(req->ctx->inst->accel_dev); 121 struct device *dev = &GET_DEV(req->ctx->inst->accel_dev);
119 int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET( 122 int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
120 resp->pke_resp_hdr.comn_resp_flags); 123 resp->pke_resp_hdr.comn_resp_flags);
121 char *ptr = areq->dst;
122 124
123 err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL; 125 err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
124 126
@@ -129,24 +131,44 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
129 dma_unmap_single(dev, req->in.enc.m, req->ctx->key_sz, 131 dma_unmap_single(dev, req->in.enc.m, req->ctx->key_sz,
130 DMA_TO_DEVICE); 132 DMA_TO_DEVICE);
131 133
132 dma_unmap_single(dev, req->out.enc.c, req->ctx->key_sz, 134 areq->dst_len = req->ctx->key_sz;
133 DMA_FROM_DEVICE); 135 if (req->dst_align) {
136 char *ptr = req->dst_align;
137
138 while (!(*ptr) && areq->dst_len) {
139 areq->dst_len--;
140 ptr++;
141 }
142
143 if (areq->dst_len != req->ctx->key_sz)
144 memmove(req->dst_align, ptr, areq->dst_len);
145
146 scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
147 areq->dst_len, 1);
148
149 dma_free_coherent(dev, req->ctx->key_sz, req->dst_align,
150 req->out.enc.c);
151 } else {
152 char *ptr = sg_virt(areq->dst);
153
154 while (!(*ptr) && areq->dst_len) {
155 areq->dst_len--;
156 ptr++;
157 }
158
159 if (sg_virt(areq->dst) != ptr && areq->dst_len)
160 memmove(sg_virt(areq->dst), ptr, areq->dst_len);
161
162 dma_unmap_single(dev, req->out.enc.c, req->ctx->key_sz,
163 DMA_FROM_DEVICE);
164 }
165
134 dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params), 166 dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
135 DMA_TO_DEVICE); 167 DMA_TO_DEVICE);
136 dma_unmap_single(dev, req->phy_out, 168 dma_unmap_single(dev, req->phy_out,
137 sizeof(struct qat_rsa_output_params), 169 sizeof(struct qat_rsa_output_params),
138 DMA_TO_DEVICE); 170 DMA_TO_DEVICE);
139 171
140 areq->dst_len = req->ctx->key_sz;
141 /* Need to set the corect length of the output */
142 while (!(*ptr) && areq->dst_len) {
143 areq->dst_len--;
144 ptr++;
145 }
146
147 if (areq->dst_len != req->ctx->key_sz)
148 memmove(areq->dst, ptr, areq->dst_len);
149
150 akcipher_request_complete(areq, err); 172 akcipher_request_complete(areq, err);
151} 173}
152 174
@@ -255,8 +277,16 @@ static int qat_rsa_enc(struct akcipher_request *req)
255 * same as modulo n so in case it is different we need to allocate a 277 * same as modulo n so in case it is different we need to allocate a
256 * new buf and copy src data. 278 * new buf and copy src data.
257 * In other case we just need to map the user provided buffer. 279 * In other case we just need to map the user provided buffer.
280 * Also need to make sure that it is in contiguous buffer.
258 */ 281 */
259 if (req->src_len < ctx->key_sz) { 282 if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
283 qat_req->src_align = NULL;
284 qat_req->in.enc.m = dma_map_single(dev, sg_virt(req->src),
285 req->src_len, DMA_TO_DEVICE);
286 if (unlikely(dma_mapping_error(dev, qat_req->in.enc.m)))
287 return ret;
288
289 } else {
260 int shift = ctx->key_sz - req->src_len; 290 int shift = ctx->key_sz - req->src_len;
261 291
262 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, 292 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
@@ -265,29 +295,39 @@ static int qat_rsa_enc(struct akcipher_request *req)
265 if (unlikely(!qat_req->src_align)) 295 if (unlikely(!qat_req->src_align))
266 return ret; 296 return ret;
267 297
268 memcpy(qat_req->src_align + shift, req->src, req->src_len); 298 scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
299 0, req->src_len, 0);
300 }
301 if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
302 qat_req->dst_align = NULL;
303 qat_req->out.enc.c = dma_map_single(dev, sg_virt(req->dst),
304 req->dst_len,
305 DMA_FROM_DEVICE);
306
307 if (unlikely(dma_mapping_error(dev, qat_req->out.enc.c)))
308 goto unmap_src;
309
269 } else { 310 } else {
270 qat_req->src_align = NULL; 311 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
271 qat_req->in.enc.m = dma_map_single(dev, req->src, req->src_len, 312 &qat_req->out.enc.c,
272 DMA_TO_DEVICE); 313 GFP_KERNEL);
314 if (unlikely(!qat_req->dst_align))
315 goto unmap_src;
316
273 } 317 }
274 qat_req->in.in_tab[3] = 0; 318 qat_req->in.in_tab[3] = 0;
275 qat_req->out.enc.c = dma_map_single(dev, req->dst, req->dst_len,
276 DMA_FROM_DEVICE);
277 qat_req->out.out_tab[1] = 0; 319 qat_req->out.out_tab[1] = 0;
278 qat_req->phy_in = dma_map_single(dev, &qat_req->in.enc.m, 320 qat_req->phy_in = dma_map_single(dev, &qat_req->in.enc.m,
279 sizeof(struct qat_rsa_input_params), 321 sizeof(struct qat_rsa_input_params),
280 DMA_TO_DEVICE); 322 DMA_TO_DEVICE);
323 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
324 goto unmap_dst;
325
281 qat_req->phy_out = dma_map_single(dev, &qat_req->out.enc.c, 326 qat_req->phy_out = dma_map_single(dev, &qat_req->out.enc.c,
282 sizeof(struct qat_rsa_output_params), 327 sizeof(struct qat_rsa_output_params),
283 DMA_TO_DEVICE); 328 DMA_TO_DEVICE);
284 329 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
285 if (unlikely((!qat_req->src_align && 330 goto unmap_in_params;
286 dma_mapping_error(dev, qat_req->in.enc.m)) ||
287 dma_mapping_error(dev, qat_req->out.enc.c) ||
288 dma_mapping_error(dev, qat_req->phy_in) ||
289 dma_mapping_error(dev, qat_req->phy_out)))
290 goto unmap;
291 331
292 msg->pke_mid.src_data_addr = qat_req->phy_in; 332 msg->pke_mid.src_data_addr = qat_req->phy_in;
293 msg->pke_mid.dest_data_addr = qat_req->phy_out; 333 msg->pke_mid.dest_data_addr = qat_req->phy_out;
@@ -300,7 +340,7 @@ static int qat_rsa_enc(struct akcipher_request *req)
300 340
301 if (!ret) 341 if (!ret)
302 return -EINPROGRESS; 342 return -EINPROGRESS;
303unmap: 343unmap_src:
304 if (qat_req->src_align) 344 if (qat_req->src_align)
305 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, 345 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
306 qat_req->in.enc.m); 346 qat_req->in.enc.m);
@@ -308,9 +348,15 @@ unmap:
308 if (!dma_mapping_error(dev, qat_req->in.enc.m)) 348 if (!dma_mapping_error(dev, qat_req->in.enc.m))
309 dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz, 349 dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz,
310 DMA_TO_DEVICE); 350 DMA_TO_DEVICE);
311 if (!dma_mapping_error(dev, qat_req->out.enc.c)) 351unmap_dst:
312 dma_unmap_single(dev, qat_req->out.enc.c, ctx->key_sz, 352 if (qat_req->dst_align)
313 DMA_FROM_DEVICE); 353 dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
354 qat_req->out.enc.c);
355 else
356 if (!dma_mapping_error(dev, qat_req->out.enc.c))
357 dma_unmap_single(dev, qat_req->out.enc.c, ctx->key_sz,
358 DMA_FROM_DEVICE);
359unmap_in_params:
314 if (!dma_mapping_error(dev, qat_req->phy_in)) 360 if (!dma_mapping_error(dev, qat_req->phy_in))
315 dma_unmap_single(dev, qat_req->phy_in, 361 dma_unmap_single(dev, qat_req->phy_in,
316 sizeof(struct qat_rsa_input_params), 362 sizeof(struct qat_rsa_input_params),
@@ -362,8 +408,16 @@ static int qat_rsa_dec(struct akcipher_request *req)
362 * same as modulo n so in case it is different we need to allocate a 408 * same as modulo n so in case it is different we need to allocate a
363 * new buf and copy src data. 409 * new buf and copy src data.
364 * In other case we just need to map the user provided buffer. 410 * In other case we just need to map the user provided buffer.
411 * Also need to make sure that it is in contiguous buffer.
365 */ 412 */
366 if (req->src_len < ctx->key_sz) { 413 if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
414 qat_req->src_align = NULL;
415 qat_req->in.dec.c = dma_map_single(dev, sg_virt(req->src),
416 req->dst_len, DMA_TO_DEVICE);
417 if (unlikely(dma_mapping_error(dev, qat_req->in.dec.c)))
418 return ret;
419
420 } else {
367 int shift = ctx->key_sz - req->src_len; 421 int shift = ctx->key_sz - req->src_len;
368 422
369 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, 423 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
@@ -372,29 +426,40 @@ static int qat_rsa_dec(struct akcipher_request *req)
372 if (unlikely(!qat_req->src_align)) 426 if (unlikely(!qat_req->src_align))
373 return ret; 427 return ret;
374 428
375 memcpy(qat_req->src_align + shift, req->src, req->src_len); 429 scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
430 0, req->src_len, 0);
431 }
432 if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
433 qat_req->dst_align = NULL;
434 qat_req->out.dec.m = dma_map_single(dev, sg_virt(req->dst),
435 req->dst_len,
436 DMA_FROM_DEVICE);
437
438 if (unlikely(dma_mapping_error(dev, qat_req->out.dec.m)))
439 goto unmap_src;
440
376 } else { 441 } else {
377 qat_req->src_align = NULL; 442 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
378 qat_req->in.dec.c = dma_map_single(dev, req->src, req->src_len, 443 &qat_req->out.dec.m,
379 DMA_TO_DEVICE); 444 GFP_KERNEL);
445 if (unlikely(!qat_req->dst_align))
446 goto unmap_src;
447
380 } 448 }
449
381 qat_req->in.in_tab[3] = 0; 450 qat_req->in.in_tab[3] = 0;
382 qat_req->out.dec.m = dma_map_single(dev, req->dst, req->dst_len,
383 DMA_FROM_DEVICE);
384 qat_req->out.out_tab[1] = 0; 451 qat_req->out.out_tab[1] = 0;
385 qat_req->phy_in = dma_map_single(dev, &qat_req->in.dec.c, 452 qat_req->phy_in = dma_map_single(dev, &qat_req->in.dec.c,
386 sizeof(struct qat_rsa_input_params), 453 sizeof(struct qat_rsa_input_params),
387 DMA_TO_DEVICE); 454 DMA_TO_DEVICE);
455 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
456 goto unmap_dst;
457
388 qat_req->phy_out = dma_map_single(dev, &qat_req->out.dec.m, 458 qat_req->phy_out = dma_map_single(dev, &qat_req->out.dec.m,
389 sizeof(struct qat_rsa_output_params), 459 sizeof(struct qat_rsa_output_params),
390 DMA_TO_DEVICE); 460 DMA_TO_DEVICE);
391 461 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
392 if (unlikely((!qat_req->src_align && 462 goto unmap_in_params;
393 dma_mapping_error(dev, qat_req->in.dec.c)) ||
394 dma_mapping_error(dev, qat_req->out.dec.m) ||
395 dma_mapping_error(dev, qat_req->phy_in) ||
396 dma_mapping_error(dev, qat_req->phy_out)))
397 goto unmap;
398 463
399 msg->pke_mid.src_data_addr = qat_req->phy_in; 464 msg->pke_mid.src_data_addr = qat_req->phy_in;
400 msg->pke_mid.dest_data_addr = qat_req->phy_out; 465 msg->pke_mid.dest_data_addr = qat_req->phy_out;
@@ -407,7 +472,7 @@ static int qat_rsa_dec(struct akcipher_request *req)
407 472
408 if (!ret) 473 if (!ret)
409 return -EINPROGRESS; 474 return -EINPROGRESS;
410unmap: 475unmap_src:
411 if (qat_req->src_align) 476 if (qat_req->src_align)
412 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, 477 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
413 qat_req->in.dec.c); 478 qat_req->in.dec.c);
@@ -415,9 +480,15 @@ unmap:
415 if (!dma_mapping_error(dev, qat_req->in.dec.c)) 480 if (!dma_mapping_error(dev, qat_req->in.dec.c))
416 dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz, 481 dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz,
417 DMA_TO_DEVICE); 482 DMA_TO_DEVICE);
418 if (!dma_mapping_error(dev, qat_req->out.dec.m)) 483unmap_dst:
419 dma_unmap_single(dev, qat_req->out.dec.m, ctx->key_sz, 484 if (qat_req->dst_align)
420 DMA_FROM_DEVICE); 485 dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
486 qat_req->out.dec.m);
487 else
488 if (!dma_mapping_error(dev, qat_req->out.dec.m))
489 dma_unmap_single(dev, qat_req->out.dec.m, ctx->key_sz,
490 DMA_FROM_DEVICE);
491unmap_in_params:
421 if (!dma_mapping_error(dev, qat_req->phy_in)) 492 if (!dma_mapping_error(dev, qat_req->phy_in))
422 dma_unmap_single(dev, qat_req->phy_in, 493 dma_unmap_single(dev, qat_req->phy_in,
423 sizeof(struct qat_rsa_input_params), 494 sizeof(struct qat_rsa_input_params),
@@ -531,7 +602,7 @@ err:
531} 602}
532 603
533static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key, 604static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
534 unsigned int keylen) 605 unsigned int keylen, bool private)
535{ 606{
536 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 607 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
537 struct device *dev = &GET_DEV(ctx->inst->accel_dev); 608 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
@@ -550,7 +621,13 @@ static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
550 ctx->n = NULL; 621 ctx->n = NULL;
551 ctx->e = NULL; 622 ctx->e = NULL;
552 ctx->d = NULL; 623 ctx->d = NULL;
553 ret = asn1_ber_decoder(&qat_rsakey_decoder, ctx, key, keylen); 624
625 if (private)
626 ret = asn1_ber_decoder(&qat_rsaprivkey_decoder, ctx, key,
627 keylen);
628 else
629 ret = asn1_ber_decoder(&qat_rsapubkey_decoder, ctx, key,
630 keylen);
554 if (ret < 0) 631 if (ret < 0)
555 goto free; 632 goto free;
556 633
@@ -559,6 +636,11 @@ static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
559 ret = -EINVAL; 636 ret = -EINVAL;
560 goto free; 637 goto free;
561 } 638 }
639 if (private && !ctx->d) {
640 /* invalid private key provided */
641 ret = -EINVAL;
642 goto free;
643 }
562 644
563 return 0; 645 return 0;
564free: 646free:
@@ -579,6 +661,25 @@ free:
579 return ret; 661 return ret;
580} 662}
581 663
664static int qat_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
665 unsigned int keylen)
666{
667 return qat_rsa_setkey(tfm, key, keylen, false);
668}
669
670static int qat_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
671 unsigned int keylen)
672{
673 return qat_rsa_setkey(tfm, key, keylen, true);
674}
675
676static int qat_rsa_max_size(struct crypto_akcipher *tfm)
677{
678 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
679
680 return (ctx->n) ? ctx->key_sz : -EINVAL;
681}
682
582static int qat_rsa_init_tfm(struct crypto_akcipher *tfm) 683static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
583{ 684{
584 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 685 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
@@ -617,7 +718,9 @@ static struct akcipher_alg rsa = {
617 .decrypt = qat_rsa_dec, 718 .decrypt = qat_rsa_dec,
618 .sign = qat_rsa_dec, 719 .sign = qat_rsa_dec,
619 .verify = qat_rsa_enc, 720 .verify = qat_rsa_enc,
620 .setkey = qat_rsa_setkey, 721 .set_pub_key = qat_rsa_setpubkey,
722 .set_priv_key = qat_rsa_setprivkey,
723 .max_size = qat_rsa_max_size,
621 .init = qat_rsa_init_tfm, 724 .init = qat_rsa_init_tfm,
622 .exit = qat_rsa_exit_tfm, 725 .exit = qat_rsa_exit_tfm,
623 .reqsize = sizeof(struct qat_rsa_request) + 64, 726 .reqsize = sizeof(struct qat_rsa_request) + 64,
diff --git a/drivers/crypto/qat/qat_common/qat_rsakey.asn1 b/drivers/crypto/qat/qat_common/qat_rsakey.asn1
deleted file mode 100644
index 97b0e02b600a..000000000000
--- a/drivers/crypto/qat/qat_common/qat_rsakey.asn1
+++ /dev/null
@@ -1,5 +0,0 @@
1RsaKey ::= SEQUENCE {
2 n INTEGER ({ qat_rsa_get_n }),
3 e INTEGER ({ qat_rsa_get_e }),
4 d INTEGER ({ qat_rsa_get_d })
5}
diff --git a/drivers/crypto/qat/qat_common/qat_rsaprivkey.asn1 b/drivers/crypto/qat/qat_common/qat_rsaprivkey.asn1
new file mode 100644
index 000000000000..f0066adb79b8
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_rsaprivkey.asn1
@@ -0,0 +1,11 @@
1RsaPrivKey ::= SEQUENCE {
2 version INTEGER,
3 n INTEGER ({ qat_rsa_get_n }),
4 e INTEGER ({ qat_rsa_get_e }),
5 d INTEGER ({ qat_rsa_get_d }),
6 prime1 INTEGER,
7 prime2 INTEGER,
8 exponent1 INTEGER,
9 exponent2 INTEGER,
10 coefficient INTEGER
11}
diff --git a/drivers/crypto/qat/qat_common/qat_rsapubkey.asn1 b/drivers/crypto/qat/qat_common/qat_rsapubkey.asn1
new file mode 100644
index 000000000000..bd667b31a21a
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_rsapubkey.asn1
@@ -0,0 +1,4 @@
1RsaPubKey ::= SEQUENCE {
2 n INTEGER ({ qat_rsa_get_n }),
3 e INTEGER ({ qat_rsa_get_e })
4}