aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSalvatore Benedetto <salvatore.benedetto@intel.com>2016-07-07 10:27:29 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2016-07-11 06:03:10 -0400
commitc9839143ebbf5e821128da44f7e271d745aab19e (patch)
tree48ce8c5f1a59f4530a2ae5d2b73bcd030212ef0e
parent8ef7cafbccd84d509bbfba63188c8a3e0936e2ee (diff)
crypto: qat - Add DH support
Add DH support under kpp api. Drop struct qat_rsa_request and introduce a more generic struct qat_asym_request and share it between RSA and DH requests. Signed-off-by: Salvatore Benedetto <salvatore.benedetto@intel.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--drivers/crypto/qat/Kconfig1
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c593
2 files changed, 522 insertions, 72 deletions
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig
index 571d04dda415..ce3cae40f949 100644
--- a/drivers/crypto/qat/Kconfig
+++ b/drivers/crypto/qat/Kconfig
@@ -4,6 +4,7 @@ config CRYPTO_DEV_QAT
4 select CRYPTO_AUTHENC 4 select CRYPTO_AUTHENC
5 select CRYPTO_BLKCIPHER 5 select CRYPTO_BLKCIPHER
6 select CRYPTO_AKCIPHER 6 select CRYPTO_AKCIPHER
7 select CRYPTO_DH
7 select CRYPTO_HMAC 8 select CRYPTO_HMAC
8 select CRYPTO_RSA 9 select CRYPTO_RSA
9 select CRYPTO_SHA1 10 select CRYPTO_SHA1
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index eaff02a3b1ac..3d56fb82f48a 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -49,6 +49,9 @@
49#include <crypto/internal/rsa.h> 49#include <crypto/internal/rsa.h>
50#include <crypto/internal/akcipher.h> 50#include <crypto/internal/akcipher.h>
51#include <crypto/akcipher.h> 51#include <crypto/akcipher.h>
52#include <crypto/kpp.h>
53#include <crypto/internal/kpp.h>
54#include <crypto/dh.h>
52#include <linux/dma-mapping.h> 55#include <linux/dma-mapping.h>
53#include <linux/fips.h> 56#include <linux/fips.h>
54#include <crypto/scatterwalk.h> 57#include <crypto/scatterwalk.h>
@@ -119,36 +122,454 @@ struct qat_rsa_ctx {
119 struct qat_crypto_instance *inst; 122 struct qat_crypto_instance *inst;
120} __packed __aligned(64); 123} __packed __aligned(64);
121 124
122struct qat_rsa_request { 125struct qat_dh_input_params {
123 struct qat_rsa_input_params in; 126 union {
124 struct qat_rsa_output_params out; 127 struct {
128 dma_addr_t b;
129 dma_addr_t xa;
130 dma_addr_t p;
131 } in;
132 struct {
133 dma_addr_t xa;
134 dma_addr_t p;
135 } in_g2;
136 u64 in_tab[8];
137 };
138} __packed __aligned(64);
139
140struct qat_dh_output_params {
141 union {
142 dma_addr_t r;
143 u64 out_tab[8];
144 };
145} __packed __aligned(64);
146
147struct qat_dh_ctx {
148 char *g;
149 char *xa;
150 char *p;
151 dma_addr_t dma_g;
152 dma_addr_t dma_xa;
153 dma_addr_t dma_p;
154 unsigned int p_size;
155 bool g2;
156 struct qat_crypto_instance *inst;
157} __packed __aligned(64);
158
159struct qat_asym_request {
160 union {
161 struct qat_rsa_input_params rsa;
162 struct qat_dh_input_params dh;
163 } in;
164 union {
165 struct qat_rsa_output_params rsa;
166 struct qat_dh_output_params dh;
167 } out;
125 dma_addr_t phy_in; 168 dma_addr_t phy_in;
126 dma_addr_t phy_out; 169 dma_addr_t phy_out;
127 char *src_align; 170 char *src_align;
128 char *dst_align; 171 char *dst_align;
129 struct icp_qat_fw_pke_request req; 172 struct icp_qat_fw_pke_request req;
130 struct qat_rsa_ctx *ctx; 173 union {
174 struct qat_rsa_ctx *rsa;
175 struct qat_dh_ctx *dh;
176 } ctx;
177 union {
178 struct akcipher_request *rsa;
179 struct kpp_request *dh;
180 } areq;
131 int err; 181 int err;
182 void (*cb)(struct icp_qat_fw_pke_resp *resp);
132} __aligned(64); 183} __aligned(64);
133 184
185static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
186{
187 struct qat_asym_request *req = (void *)(__force long)resp->opaque;
188 struct kpp_request *areq = req->areq.dh;
189 struct device *dev = &GET_DEV(req->ctx.dh->inst->accel_dev);
190 int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
191 resp->pke_resp_hdr.comn_resp_flags);
192
193 err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
194
195 if (areq->src) {
196 if (req->src_align)
197 dma_free_coherent(dev, req->ctx.dh->p_size,
198 req->src_align, req->in.dh.in.b);
199 else
200 dma_unmap_single(dev, req->in.dh.in.b,
201 req->ctx.dh->p_size, DMA_TO_DEVICE);
202 }
203
204 areq->dst_len = req->ctx.dh->p_size;
205 if (req->dst_align) {
206 scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
207 areq->dst_len, 1);
208
209 dma_free_coherent(dev, req->ctx.dh->p_size, req->dst_align,
210 req->out.dh.r);
211 } else {
212 dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
213 DMA_FROM_DEVICE);
214 }
215
216 dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
217 DMA_TO_DEVICE);
218 dma_unmap_single(dev, req->phy_out,
219 sizeof(struct qat_dh_output_params),
220 DMA_TO_DEVICE);
221
222 kpp_request_complete(areq, err);
223}
224
225#define PKE_DH_1536 0x390c1a49
226#define PKE_DH_G2_1536 0x2e0b1a3e
227#define PKE_DH_2048 0x4d0c1a60
228#define PKE_DH_G2_2048 0x3e0b1a55
229#define PKE_DH_3072 0x510c1a77
230#define PKE_DH_G2_3072 0x3a0b1a6c
231#define PKE_DH_4096 0x690c1a8e
232#define PKE_DH_G2_4096 0x4a0b1a83
233
234static unsigned long qat_dh_fn_id(unsigned int len, bool g2)
235{
236 unsigned int bitslen = len << 3;
237
238 switch (bitslen) {
239 case 1536:
240 return g2 ? PKE_DH_G2_1536 : PKE_DH_1536;
241 case 2048:
242 return g2 ? PKE_DH_G2_2048 : PKE_DH_2048;
243 case 3072:
244 return g2 ? PKE_DH_G2_3072 : PKE_DH_3072;
245 case 4096:
246 return g2 ? PKE_DH_G2_4096 : PKE_DH_4096;
247 default:
248 return 0;
249 };
250}
251
252static inline struct qat_dh_ctx *qat_dh_get_params(struct crypto_kpp *tfm)
253{
254 return kpp_tfm_ctx(tfm);
255}
256
257static int qat_dh_compute_value(struct kpp_request *req)
258{
259 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
260 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
261 struct qat_crypto_instance *inst = ctx->inst;
262 struct device *dev = &GET_DEV(inst->accel_dev);
263 struct qat_asym_request *qat_req =
264 PTR_ALIGN(kpp_request_ctx(req), 64);
265 struct icp_qat_fw_pke_request *msg = &qat_req->req;
266 int ret, ctr = 0;
267 int n_input_params = 0;
268
269 if (unlikely(!ctx->xa))
270 return -EINVAL;
271
272 if (req->dst_len < ctx->p_size) {
273 req->dst_len = ctx->p_size;
274 return -EOVERFLOW;
275 }
276 memset(msg, '\0', sizeof(*msg));
277 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
278 ICP_QAT_FW_COMN_REQ_FLAG_SET);
279
280 msg->pke_hdr.cd_pars.func_id = qat_dh_fn_id(ctx->p_size,
281 !req->src && ctx->g2);
282 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
283 return -EINVAL;
284
285 qat_req->cb = qat_dh_cb;
286 qat_req->ctx.dh = ctx;
287 qat_req->areq.dh = req;
288 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
289 msg->pke_hdr.comn_req_flags =
290 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
291 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
292
293 /*
294 * If no source is provided use g as base
295 */
296 if (req->src) {
297 qat_req->in.dh.in.xa = ctx->dma_xa;
298 qat_req->in.dh.in.p = ctx->dma_p;
299 n_input_params = 3;
300 } else {
301 if (ctx->g2) {
302 qat_req->in.dh.in_g2.xa = ctx->dma_xa;
303 qat_req->in.dh.in_g2.p = ctx->dma_p;
304 n_input_params = 2;
305 } else {
306 qat_req->in.dh.in.b = ctx->dma_g;
307 qat_req->in.dh.in.xa = ctx->dma_xa;
308 qat_req->in.dh.in.p = ctx->dma_p;
309 n_input_params = 3;
310 }
311 }
312
313 ret = -ENOMEM;
314 if (req->src) {
315 /*
316 * src can be of any size in valid range, but HW expects it to
317 * be the same as modulo p so in case it is different we need
318 * to allocate a new buf and copy src data.
319 * In other case we just need to map the user provided buffer.
320 * Also need to make sure that it is in contiguous buffer.
321 */
322 if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
323 qat_req->src_align = NULL;
324 qat_req->in.dh.in.b = dma_map_single(dev,
325 sg_virt(req->src),
326 req->src_len,
327 DMA_TO_DEVICE);
328 if (unlikely(dma_mapping_error(dev,
329 qat_req->in.dh.in.b)))
330 return ret;
331
332 } else {
333 int shift = ctx->p_size - req->src_len;
334
335 qat_req->src_align = dma_zalloc_coherent(dev,
336 ctx->p_size,
337 &qat_req->in.dh.in.b,
338 GFP_KERNEL);
339 if (unlikely(!qat_req->src_align))
340 return ret;
341
342 scatterwalk_map_and_copy(qat_req->src_align + shift,
343 req->src, 0, req->src_len, 0);
344 }
345 }
346 /*
347 * dst can be of any size in valid range, but HW expects it to be the
348 * same as modulo m so in case it is different we need to allocate a
349 * new buf and copy src data.
350 * In other case we just need to map the user provided buffer.
351 * Also need to make sure that it is in contiguous buffer.
352 */
353 if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
354 qat_req->dst_align = NULL;
355 qat_req->out.dh.r = dma_map_single(dev, sg_virt(req->dst),
356 req->dst_len,
357 DMA_FROM_DEVICE);
358
359 if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
360 goto unmap_src;
361
362 } else {
363 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->p_size,
364 &qat_req->out.dh.r,
365 GFP_KERNEL);
366 if (unlikely(!qat_req->dst_align))
367 goto unmap_src;
368 }
369
370 qat_req->in.dh.in_tab[n_input_params] = 0;
371 qat_req->out.dh.out_tab[1] = 0;
372 /* Mapping in.in.b or in.in_g2.xa is the same */
373 qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh.in.b,
374 sizeof(struct qat_dh_input_params),
375 DMA_TO_DEVICE);
376 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
377 goto unmap_dst;
378
379 qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh.r,
380 sizeof(struct qat_dh_output_params),
381 DMA_TO_DEVICE);
382 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
383 goto unmap_in_params;
384
385 msg->pke_mid.src_data_addr = qat_req->phy_in;
386 msg->pke_mid.dest_data_addr = qat_req->phy_out;
387 msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
388 msg->input_param_count = n_input_params;
389 msg->output_param_count = 1;
390
391 do {
392 ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
393 } while (ret == -EBUSY && ctr++ < 100);
394
395 if (!ret)
396 return -EINPROGRESS;
397
398 if (!dma_mapping_error(dev, qat_req->phy_out))
399 dma_unmap_single(dev, qat_req->phy_out,
400 sizeof(struct qat_dh_output_params),
401 DMA_TO_DEVICE);
402unmap_in_params:
403 if (!dma_mapping_error(dev, qat_req->phy_in))
404 dma_unmap_single(dev, qat_req->phy_in,
405 sizeof(struct qat_dh_input_params),
406 DMA_TO_DEVICE);
407unmap_dst:
408 if (qat_req->dst_align)
409 dma_free_coherent(dev, ctx->p_size, qat_req->dst_align,
410 qat_req->out.dh.r);
411 else
412 if (!dma_mapping_error(dev, qat_req->out.dh.r))
413 dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
414 DMA_FROM_DEVICE);
415unmap_src:
416 if (req->src) {
417 if (qat_req->src_align)
418 dma_free_coherent(dev, ctx->p_size, qat_req->src_align,
419 qat_req->in.dh.in.b);
420 else
421 if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
422 dma_unmap_single(dev, qat_req->in.dh.in.b,
423 ctx->p_size,
424 DMA_TO_DEVICE);
425 }
426 return ret;
427}
428
429static int qat_dh_check_params_length(unsigned int p_len)
430{
431 switch (p_len) {
432 case 1536:
433 case 2048:
434 case 3072:
435 case 4096:
436 return 0;
437 }
438 return -EINVAL;
439}
440
441static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
442{
443 struct qat_crypto_instance *inst = ctx->inst;
444 struct device *dev = &GET_DEV(inst->accel_dev);
445
446 if (unlikely(!params->p || !params->g))
447 return -EINVAL;
448
449 if (qat_dh_check_params_length(params->p_size << 3))
450 return -EINVAL;
451
452 ctx->p_size = params->p_size;
453 ctx->p = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
454 if (!ctx->p)
455 return -ENOMEM;
456 memcpy(ctx->p, params->p, ctx->p_size);
457
458 /* If g equals 2 don't copy it */
459 if (params->g_size == 1 && *(char *)params->g == 0x02) {
460 ctx->g2 = true;
461 return 0;
462 }
463
464 ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
465 if (!ctx->g) {
466 dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
467 ctx->p = NULL;
468 return -ENOMEM;
469 }
470 memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
471 params->g_size);
472
473 return 0;
474}
475
476static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx)
477{
478 if (ctx->g) {
479 dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g);
480 ctx->g = NULL;
481 }
482 if (ctx->xa) {
483 dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa);
484 ctx->xa = NULL;
485 }
486 if (ctx->p) {
487 dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
488 ctx->p = NULL;
489 }
490 ctx->p_size = 0;
491 ctx->g2 = false;
492}
493
494static int qat_dh_set_secret(struct crypto_kpp *tfm, void *buf,
495 unsigned int len)
496{
497 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
498 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
499 struct dh params;
500 int ret;
501
502 if (crypto_dh_decode_key(buf, len, &params) < 0)
503 return -EINVAL;
504
505 /* Free old secret if any */
506 qat_dh_clear_ctx(dev, ctx);
507
508 ret = qat_dh_set_params(ctx, &params);
509 if (ret < 0)
510 return ret;
511
512 ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
513 GFP_KERNEL);
514 if (!ctx->xa) {
515 qat_dh_clear_ctx(dev, ctx);
516 return -ENOMEM;
517 }
518 memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key,
519 params.key_size);
520
521 return 0;
522}
523
524static int qat_dh_max_size(struct crypto_kpp *tfm)
525{
526 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
527
528 return ctx->p ? ctx->p_size : -EINVAL;
529}
530
531static int qat_dh_init_tfm(struct crypto_kpp *tfm)
532{
533 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
534 struct qat_crypto_instance *inst =
535 qat_crypto_get_instance_node(get_current_node());
536
537 if (!inst)
538 return -EINVAL;
539
540 ctx->p_size = 0;
541 ctx->g2 = false;
542 ctx->inst = inst;
543 return 0;
544}
545
546static void qat_dh_exit_tfm(struct crypto_kpp *tfm)
547{
548 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
549 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
550
551 qat_dh_clear_ctx(dev, ctx);
552 qat_crypto_put_instance(ctx->inst);
553}
554
134static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp) 555static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
135{ 556{
136 struct akcipher_request *areq = (void *)(__force long)resp->opaque; 557 struct qat_asym_request *req = (void *)(__force long)resp->opaque;
137 struct qat_rsa_request *req = PTR_ALIGN(akcipher_request_ctx(areq), 64); 558 struct akcipher_request *areq = req->areq.rsa;
138 struct device *dev = &GET_DEV(req->ctx->inst->accel_dev); 559 struct device *dev = &GET_DEV(req->ctx.rsa->inst->accel_dev);
139 int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET( 560 int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
140 resp->pke_resp_hdr.comn_resp_flags); 561 resp->pke_resp_hdr.comn_resp_flags);
141 562
142 err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL; 563 err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
143 564
144 if (req->src_align) 565 if (req->src_align)
145 dma_free_coherent(dev, req->ctx->key_sz, req->src_align, 566 dma_free_coherent(dev, req->ctx.rsa->key_sz, req->src_align,
146 req->in.enc.m); 567 req->in.rsa.enc.m);
147 else 568 else
148 dma_unmap_single(dev, req->in.enc.m, req->ctx->key_sz, 569 dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
149 DMA_TO_DEVICE); 570 DMA_TO_DEVICE);
150 571
151 areq->dst_len = req->ctx->key_sz; 572 areq->dst_len = req->ctx.rsa->key_sz;
152 if (req->dst_align) { 573 if (req->dst_align) {
153 char *ptr = req->dst_align; 574 char *ptr = req->dst_align;
154 575
@@ -157,14 +578,14 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
157 ptr++; 578 ptr++;
158 } 579 }
159 580
160 if (areq->dst_len != req->ctx->key_sz) 581 if (areq->dst_len != req->ctx.rsa->key_sz)
161 memmove(req->dst_align, ptr, areq->dst_len); 582 memmove(req->dst_align, ptr, areq->dst_len);
162 583
163 scatterwalk_map_and_copy(req->dst_align, areq->dst, 0, 584 scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
164 areq->dst_len, 1); 585 areq->dst_len, 1);
165 586
166 dma_free_coherent(dev, req->ctx->key_sz, req->dst_align, 587 dma_free_coherent(dev, req->ctx.rsa->key_sz, req->dst_align,
167 req->out.enc.c); 588 req->out.rsa.enc.c);
168 } else { 589 } else {
169 char *ptr = sg_virt(areq->dst); 590 char *ptr = sg_virt(areq->dst);
170 591
@@ -176,7 +597,7 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
176 if (sg_virt(areq->dst) != ptr && areq->dst_len) 597 if (sg_virt(areq->dst) != ptr && areq->dst_len)
177 memmove(sg_virt(areq->dst), ptr, areq->dst_len); 598 memmove(sg_virt(areq->dst), ptr, areq->dst_len);
178 599
179 dma_unmap_single(dev, req->out.enc.c, req->ctx->key_sz, 600 dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
180 DMA_FROM_DEVICE); 601 DMA_FROM_DEVICE);
181 } 602 }
182 603
@@ -192,8 +613,9 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
192void qat_alg_asym_callback(void *_resp) 613void qat_alg_asym_callback(void *_resp)
193{ 614{
194 struct icp_qat_fw_pke_resp *resp = _resp; 615 struct icp_qat_fw_pke_resp *resp = _resp;
616 struct qat_asym_request *areq = (void *)(__force long)resp->opaque;
195 617
196 qat_rsa_cb(resp); 618 areq->cb(resp);
197} 619}
198 620
199#define PKE_RSA_EP_512 0x1c161b21 621#define PKE_RSA_EP_512 0x1c161b21
@@ -289,7 +711,7 @@ static int qat_rsa_enc(struct akcipher_request *req)
289 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 711 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
290 struct qat_crypto_instance *inst = ctx->inst; 712 struct qat_crypto_instance *inst = ctx->inst;
291 struct device *dev = &GET_DEV(inst->accel_dev); 713 struct device *dev = &GET_DEV(inst->accel_dev);
292 struct qat_rsa_request *qat_req = 714 struct qat_asym_request *qat_req =
293 PTR_ALIGN(akcipher_request_ctx(req), 64); 715 PTR_ALIGN(akcipher_request_ctx(req), 64);
294 struct icp_qat_fw_pke_request *msg = &qat_req->req; 716 struct icp_qat_fw_pke_request *msg = &qat_req->req;
295 int ret, ctr = 0; 717 int ret, ctr = 0;
@@ -308,14 +730,16 @@ static int qat_rsa_enc(struct akcipher_request *req)
308 if (unlikely(!msg->pke_hdr.cd_pars.func_id)) 730 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
309 return -EINVAL; 731 return -EINVAL;
310 732
311 qat_req->ctx = ctx; 733 qat_req->cb = qat_rsa_cb;
734 qat_req->ctx.rsa = ctx;
735 qat_req->areq.rsa = req;
312 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE; 736 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
313 msg->pke_hdr.comn_req_flags = 737 msg->pke_hdr.comn_req_flags =
314 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT, 738 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
315 QAT_COMN_CD_FLD_TYPE_64BIT_ADR); 739 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
316 740
317 qat_req->in.enc.e = ctx->dma_e; 741 qat_req->in.rsa.enc.e = ctx->dma_e;
318 qat_req->in.enc.n = ctx->dma_n; 742 qat_req->in.rsa.enc.n = ctx->dma_n;
319 ret = -ENOMEM; 743 ret = -ENOMEM;
320 744
321 /* 745 /*
@@ -327,16 +751,16 @@ static int qat_rsa_enc(struct akcipher_request *req)
327 */ 751 */
328 if (sg_is_last(req->src) && req->src_len == ctx->key_sz) { 752 if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
329 qat_req->src_align = NULL; 753 qat_req->src_align = NULL;
330 qat_req->in.enc.m = dma_map_single(dev, sg_virt(req->src), 754 qat_req->in.rsa.enc.m = dma_map_single(dev, sg_virt(req->src),
331 req->src_len, DMA_TO_DEVICE); 755 req->src_len, DMA_TO_DEVICE);
332 if (unlikely(dma_mapping_error(dev, qat_req->in.enc.m))) 756 if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
333 return ret; 757 return ret;
334 758
335 } else { 759 } else {
336 int shift = ctx->key_sz - req->src_len; 760 int shift = ctx->key_sz - req->src_len;
337 761
338 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, 762 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
339 &qat_req->in.enc.m, 763 &qat_req->in.rsa.enc.m,
340 GFP_KERNEL); 764 GFP_KERNEL);
341 if (unlikely(!qat_req->src_align)) 765 if (unlikely(!qat_req->src_align))
342 return ret; 766 return ret;
@@ -346,30 +770,30 @@ static int qat_rsa_enc(struct akcipher_request *req)
346 } 770 }
347 if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) { 771 if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
348 qat_req->dst_align = NULL; 772 qat_req->dst_align = NULL;
349 qat_req->out.enc.c = dma_map_single(dev, sg_virt(req->dst), 773 qat_req->out.rsa.enc.c = dma_map_single(dev, sg_virt(req->dst),
350 req->dst_len, 774 req->dst_len,
351 DMA_FROM_DEVICE); 775 DMA_FROM_DEVICE);
352 776
353 if (unlikely(dma_mapping_error(dev, qat_req->out.enc.c))) 777 if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
354 goto unmap_src; 778 goto unmap_src;
355 779
356 } else { 780 } else {
357 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz, 781 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
358 &qat_req->out.enc.c, 782 &qat_req->out.rsa.enc.c,
359 GFP_KERNEL); 783 GFP_KERNEL);
360 if (unlikely(!qat_req->dst_align)) 784 if (unlikely(!qat_req->dst_align))
361 goto unmap_src; 785 goto unmap_src;
362 786
363 } 787 }
364 qat_req->in.in_tab[3] = 0; 788 qat_req->in.rsa.in_tab[3] = 0;
365 qat_req->out.out_tab[1] = 0; 789 qat_req->out.rsa.out_tab[1] = 0;
366 qat_req->phy_in = dma_map_single(dev, &qat_req->in.enc.m, 790 qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m,
367 sizeof(struct qat_rsa_input_params), 791 sizeof(struct qat_rsa_input_params),
368 DMA_TO_DEVICE); 792 DMA_TO_DEVICE);
369 if (unlikely(dma_mapping_error(dev, qat_req->phy_in))) 793 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
370 goto unmap_dst; 794 goto unmap_dst;
371 795
372 qat_req->phy_out = dma_map_single(dev, &qat_req->out.enc.c, 796 qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.enc.c,
373 sizeof(struct qat_rsa_output_params), 797 sizeof(struct qat_rsa_output_params),
374 DMA_TO_DEVICE); 798 DMA_TO_DEVICE);
375 if (unlikely(dma_mapping_error(dev, qat_req->phy_out))) 799 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
@@ -377,7 +801,7 @@ static int qat_rsa_enc(struct akcipher_request *req)
377 801
378 msg->pke_mid.src_data_addr = qat_req->phy_in; 802 msg->pke_mid.src_data_addr = qat_req->phy_in;
379 msg->pke_mid.dest_data_addr = qat_req->phy_out; 803 msg->pke_mid.dest_data_addr = qat_req->phy_out;
380 msg->pke_mid.opaque = (uint64_t)(__force long)req; 804 msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
381 msg->input_param_count = 3; 805 msg->input_param_count = 3;
382 msg->output_param_count = 1; 806 msg->output_param_count = 1;
383 do { 807 do {
@@ -399,19 +823,19 @@ unmap_in_params:
399unmap_dst: 823unmap_dst:
400 if (qat_req->dst_align) 824 if (qat_req->dst_align)
401 dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align, 825 dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
402 qat_req->out.enc.c); 826 qat_req->out.rsa.enc.c);
403 else 827 else
404 if (!dma_mapping_error(dev, qat_req->out.enc.c)) 828 if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
405 dma_unmap_single(dev, qat_req->out.enc.c, ctx->key_sz, 829 dma_unmap_single(dev, qat_req->out.rsa.enc.c,
406 DMA_FROM_DEVICE); 830 ctx->key_sz, DMA_FROM_DEVICE);
407unmap_src: 831unmap_src:
408 if (qat_req->src_align) 832 if (qat_req->src_align)
409 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, 833 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
410 qat_req->in.enc.m); 834 qat_req->in.rsa.enc.m);
411 else 835 else
412 if (!dma_mapping_error(dev, qat_req->in.enc.m)) 836 if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
413 dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz, 837 dma_unmap_single(dev, qat_req->in.rsa.enc.m,
414 DMA_TO_DEVICE); 838 ctx->key_sz, DMA_TO_DEVICE);
415 return ret; 839 return ret;
416} 840}
417 841
@@ -421,7 +845,7 @@ static int qat_rsa_dec(struct akcipher_request *req)
421 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 845 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
422 struct qat_crypto_instance *inst = ctx->inst; 846 struct qat_crypto_instance *inst = ctx->inst;
423 struct device *dev = &GET_DEV(inst->accel_dev); 847 struct device *dev = &GET_DEV(inst->accel_dev);
424 struct qat_rsa_request *qat_req = 848 struct qat_asym_request *qat_req =
425 PTR_ALIGN(akcipher_request_ctx(req), 64); 849 PTR_ALIGN(akcipher_request_ctx(req), 64);
426 struct icp_qat_fw_pke_request *msg = &qat_req->req; 850 struct icp_qat_fw_pke_request *msg = &qat_req->req;
427 int ret, ctr = 0; 851 int ret, ctr = 0;
@@ -442,21 +866,23 @@ static int qat_rsa_dec(struct akcipher_request *req)
442 if (unlikely(!msg->pke_hdr.cd_pars.func_id)) 866 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
443 return -EINVAL; 867 return -EINVAL;
444 868
445 qat_req->ctx = ctx; 869 qat_req->cb = qat_rsa_cb;
870 qat_req->ctx.rsa = ctx;
871 qat_req->areq.rsa = req;
446 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE; 872 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
447 msg->pke_hdr.comn_req_flags = 873 msg->pke_hdr.comn_req_flags =
448 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT, 874 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
449 QAT_COMN_CD_FLD_TYPE_64BIT_ADR); 875 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
450 876
451 if (ctx->crt_mode) { 877 if (ctx->crt_mode) {
452 qat_req->in.dec_crt.p = ctx->dma_p; 878 qat_req->in.rsa.dec_crt.p = ctx->dma_p;
453 qat_req->in.dec_crt.q = ctx->dma_q; 879 qat_req->in.rsa.dec_crt.q = ctx->dma_q;
454 qat_req->in.dec_crt.dp = ctx->dma_dp; 880 qat_req->in.rsa.dec_crt.dp = ctx->dma_dp;
455 qat_req->in.dec_crt.dq = ctx->dma_dq; 881 qat_req->in.rsa.dec_crt.dq = ctx->dma_dq;
456 qat_req->in.dec_crt.qinv = ctx->dma_qinv; 882 qat_req->in.rsa.dec_crt.qinv = ctx->dma_qinv;
457 } else { 883 } else {
458 qat_req->in.dec.d = ctx->dma_d; 884 qat_req->in.rsa.dec.d = ctx->dma_d;
459 qat_req->in.dec.n = ctx->dma_n; 885 qat_req->in.rsa.dec.n = ctx->dma_n;
460 } 886 }
461 ret = -ENOMEM; 887 ret = -ENOMEM;
462 888
@@ -469,16 +895,16 @@ static int qat_rsa_dec(struct akcipher_request *req)
469 */ 895 */
470 if (sg_is_last(req->src) && req->src_len == ctx->key_sz) { 896 if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
471 qat_req->src_align = NULL; 897 qat_req->src_align = NULL;
472 qat_req->in.dec.c = dma_map_single(dev, sg_virt(req->src), 898 qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src),
473 req->dst_len, DMA_TO_DEVICE); 899 req->dst_len, DMA_TO_DEVICE);
474 if (unlikely(dma_mapping_error(dev, qat_req->in.dec.c))) 900 if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
475 return ret; 901 return ret;
476 902
477 } else { 903 } else {
478 int shift = ctx->key_sz - req->src_len; 904 int shift = ctx->key_sz - req->src_len;
479 905
480 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, 906 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
481 &qat_req->in.dec.c, 907 &qat_req->in.rsa.dec.c,
482 GFP_KERNEL); 908 GFP_KERNEL);
483 if (unlikely(!qat_req->src_align)) 909 if (unlikely(!qat_req->src_align))
484 return ret; 910 return ret;
@@ -488,16 +914,16 @@ static int qat_rsa_dec(struct akcipher_request *req)
488 } 914 }
489 if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) { 915 if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
490 qat_req->dst_align = NULL; 916 qat_req->dst_align = NULL;
491 qat_req->out.dec.m = dma_map_single(dev, sg_virt(req->dst), 917 qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst),
492 req->dst_len, 918 req->dst_len,
493 DMA_FROM_DEVICE); 919 DMA_FROM_DEVICE);
494 920
495 if (unlikely(dma_mapping_error(dev, qat_req->out.dec.m))) 921 if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
496 goto unmap_src; 922 goto unmap_src;
497 923
498 } else { 924 } else {
499 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz, 925 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
500 &qat_req->out.dec.m, 926 &qat_req->out.rsa.dec.m,
501 GFP_KERNEL); 927 GFP_KERNEL);
502 if (unlikely(!qat_req->dst_align)) 928 if (unlikely(!qat_req->dst_align))
503 goto unmap_src; 929 goto unmap_src;
@@ -505,17 +931,17 @@ static int qat_rsa_dec(struct akcipher_request *req)
505 } 931 }
506 932
507 if (ctx->crt_mode) 933 if (ctx->crt_mode)
508 qat_req->in.in_tab[6] = 0; 934 qat_req->in.rsa.in_tab[6] = 0;
509 else 935 else
510 qat_req->in.in_tab[3] = 0; 936 qat_req->in.rsa.in_tab[3] = 0;
511 qat_req->out.out_tab[1] = 0; 937 qat_req->out.rsa.out_tab[1] = 0;
512 qat_req->phy_in = dma_map_single(dev, &qat_req->in.dec.c, 938 qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c,
513 sizeof(struct qat_rsa_input_params), 939 sizeof(struct qat_rsa_input_params),
514 DMA_TO_DEVICE); 940 DMA_TO_DEVICE);
515 if (unlikely(dma_mapping_error(dev, qat_req->phy_in))) 941 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
516 goto unmap_dst; 942 goto unmap_dst;
517 943
518 qat_req->phy_out = dma_map_single(dev, &qat_req->out.dec.m, 944 qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.dec.m,
519 sizeof(struct qat_rsa_output_params), 945 sizeof(struct qat_rsa_output_params),
520 DMA_TO_DEVICE); 946 DMA_TO_DEVICE);
521 if (unlikely(dma_mapping_error(dev, qat_req->phy_out))) 947 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
@@ -523,7 +949,7 @@ static int qat_rsa_dec(struct akcipher_request *req)
523 949
524 msg->pke_mid.src_data_addr = qat_req->phy_in; 950 msg->pke_mid.src_data_addr = qat_req->phy_in;
525 msg->pke_mid.dest_data_addr = qat_req->phy_out; 951 msg->pke_mid.dest_data_addr = qat_req->phy_out;
526 msg->pke_mid.opaque = (uint64_t)(__force long)req; 952 msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
527 if (ctx->crt_mode) 953 if (ctx->crt_mode)
528 msg->input_param_count = 6; 954 msg->input_param_count = 6;
529 else 955 else
@@ -549,19 +975,19 @@ unmap_in_params:
549unmap_dst: 975unmap_dst:
550 if (qat_req->dst_align) 976 if (qat_req->dst_align)
551 dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align, 977 dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
552 qat_req->out.dec.m); 978 qat_req->out.rsa.dec.m);
553 else 979 else
554 if (!dma_mapping_error(dev, qat_req->out.dec.m)) 980 if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
555 dma_unmap_single(dev, qat_req->out.dec.m, ctx->key_sz, 981 dma_unmap_single(dev, qat_req->out.rsa.dec.m,
556 DMA_FROM_DEVICE); 982 ctx->key_sz, DMA_FROM_DEVICE);
557unmap_src: 983unmap_src:
558 if (qat_req->src_align) 984 if (qat_req->src_align)
559 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, 985 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
560 qat_req->in.dec.c); 986 qat_req->in.rsa.dec.c);
561 else 987 else
562 if (!dma_mapping_error(dev, qat_req->in.dec.c)) 988 if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
563 dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz, 989 dma_unmap_single(dev, qat_req->in.rsa.dec.c,
564 DMA_TO_DEVICE); 990 ctx->key_sz, DMA_TO_DEVICE);
565 return ret; 991 return ret;
566} 992}
567 993
@@ -900,7 +1326,7 @@ static struct akcipher_alg rsa = {
900 .max_size = qat_rsa_max_size, 1326 .max_size = qat_rsa_max_size,
901 .init = qat_rsa_init_tfm, 1327 .init = qat_rsa_init_tfm,
902 .exit = qat_rsa_exit_tfm, 1328 .exit = qat_rsa_exit_tfm,
903 .reqsize = sizeof(struct qat_rsa_request) + 64, 1329 .reqsize = sizeof(struct qat_asym_request) + 64,
904 .base = { 1330 .base = {
905 .cra_name = "rsa", 1331 .cra_name = "rsa",
906 .cra_driver_name = "qat-rsa", 1332 .cra_driver_name = "qat-rsa",
@@ -910,6 +1336,23 @@ static struct akcipher_alg rsa = {
910 }, 1336 },
911}; 1337};
912 1338
1339static struct kpp_alg dh = {
1340 .set_secret = qat_dh_set_secret,
1341 .generate_public_key = qat_dh_compute_value,
1342 .compute_shared_secret = qat_dh_compute_value,
1343 .max_size = qat_dh_max_size,
1344 .init = qat_dh_init_tfm,
1345 .exit = qat_dh_exit_tfm,
1346 .reqsize = sizeof(struct qat_asym_request) + 64,
1347 .base = {
1348 .cra_name = "dh",
1349 .cra_driver_name = "qat-dh",
1350 .cra_priority = 1000,
1351 .cra_module = THIS_MODULE,
1352 .cra_ctxsize = sizeof(struct qat_dh_ctx),
1353 },
1354};
1355
913int qat_asym_algs_register(void) 1356int qat_asym_algs_register(void)
914{ 1357{
915 int ret = 0; 1358 int ret = 0;
@@ -918,7 +1361,11 @@ int qat_asym_algs_register(void)
918 if (++active_devs == 1) { 1361 if (++active_devs == 1) {
919 rsa.base.cra_flags = 0; 1362 rsa.base.cra_flags = 0;
920 ret = crypto_register_akcipher(&rsa); 1363 ret = crypto_register_akcipher(&rsa);
1364 if (ret)
1365 goto unlock;
1366 ret = crypto_register_kpp(&dh);
921 } 1367 }
1368unlock:
922 mutex_unlock(&algs_lock); 1369 mutex_unlock(&algs_lock);
923 return ret; 1370 return ret;
924} 1371}
@@ -926,7 +1373,9 @@ int qat_asym_algs_register(void)
926void qat_asym_algs_unregister(void) 1373void qat_asym_algs_unregister(void)
927{ 1374{
928 mutex_lock(&algs_lock); 1375 mutex_lock(&algs_lock);
929 if (--active_devs == 0) 1376 if (--active_devs == 0) {
930 crypto_unregister_akcipher(&rsa); 1377 crypto_unregister_akcipher(&rsa);
1378 crypto_unregister_kpp(&dh);
1379 }
931 mutex_unlock(&algs_lock); 1380 mutex_unlock(&algs_lock);
932} 1381}