aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/ccree/cc_buffer_mgr.c
diff options
context:
space:
mode:
authorGilad Ben-Yossef <gilad@benyossef.com>2018-01-22 04:27:01 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2018-02-15 10:26:40 -0500
commit63ee04c8b491ee148489347e7da9fbfd982ca2bb (patch)
treecb983e6d07e105a71918f47804e966fd5f9f87a0 /drivers/crypto/ccree/cc_buffer_mgr.c
parent4c3f97276e156820a0433bf7b59a4df1100829ae (diff)
crypto: ccree - add skcipher support
Add CryptoCell skcipher support Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/ccree/cc_buffer_mgr.c')
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c125
1 files changed, 125 insertions, 0 deletions
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 4c6757966fd2..46be101ede56 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -8,6 +8,7 @@
8 8
9#include "cc_buffer_mgr.h" 9#include "cc_buffer_mgr.h"
10#include "cc_lli_defs.h" 10#include "cc_lli_defs.h"
11#include "cc_cipher.h"
11 12
12enum dma_buffer_type { 13enum dma_buffer_type {
13 DMA_NULL_TYPE = -1, 14 DMA_NULL_TYPE = -1,
@@ -347,6 +348,130 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
347 return 0; 348 return 0;
348} 349}
349 350
351void cc_unmap_cipher_request(struct device *dev, void *ctx,
352 unsigned int ivsize, struct scatterlist *src,
353 struct scatterlist *dst)
354{
355 struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
356
357 if (req_ctx->gen_ctx.iv_dma_addr) {
358 dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
359 &req_ctx->gen_ctx.iv_dma_addr, ivsize);
360 dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
361 ivsize,
362 req_ctx->is_giv ? DMA_BIDIRECTIONAL :
363 DMA_TO_DEVICE);
364 }
365 /* Release pool */
366 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
367 req_ctx->mlli_params.mlli_virt_addr) {
368 dma_pool_free(req_ctx->mlli_params.curr_pool,
369 req_ctx->mlli_params.mlli_virt_addr,
370 req_ctx->mlli_params.mlli_dma_addr);
371 }
372
373 dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
374 dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
375
376 if (src != dst) {
377 dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
378 dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
379 }
380}
381
382int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
383 unsigned int ivsize, unsigned int nbytes,
384 void *info, struct scatterlist *src,
385 struct scatterlist *dst, gfp_t flags)
386{
387 struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
388 struct mlli_params *mlli_params = &req_ctx->mlli_params;
389 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
390 struct device *dev = drvdata_to_dev(drvdata);
391 struct buffer_array sg_data;
392 u32 dummy = 0;
393 int rc = 0;
394 u32 mapped_nents = 0;
395
396 req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
397 mlli_params->curr_pool = NULL;
398 sg_data.num_of_buffers = 0;
399
400 /* Map IV buffer */
401 if (ivsize) {
402 dump_byte_array("iv", (u8 *)info, ivsize);
403 req_ctx->gen_ctx.iv_dma_addr =
404 dma_map_single(dev, (void *)info,
405 ivsize,
406 req_ctx->is_giv ? DMA_BIDIRECTIONAL :
407 DMA_TO_DEVICE);
408 if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
409 dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
410 ivsize, info);
411 return -ENOMEM;
412 }
413 dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
414 ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
415 } else {
416 req_ctx->gen_ctx.iv_dma_addr = 0;
417 }
418
419 /* Map the src SGL */
420 rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
421 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
422 if (rc) {
423 rc = -ENOMEM;
424 goto cipher_exit;
425 }
426 if (mapped_nents > 1)
427 req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
428
429 if (src == dst) {
430 /* Handle inplace operation */
431 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
432 req_ctx->out_nents = 0;
433 cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
434 nbytes, 0, true,
435 &req_ctx->in_mlli_nents);
436 }
437 } else {
438 /* Map the dst sg */
439 if (cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
440 &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
441 &dummy, &mapped_nents)) {
442 rc = -ENOMEM;
443 goto cipher_exit;
444 }
445 if (mapped_nents > 1)
446 req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
447
448 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
449 cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
450 nbytes, 0, true,
451 &req_ctx->in_mlli_nents);
452 cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
453 nbytes, 0, true,
454 &req_ctx->out_mlli_nents);
455 }
456 }
457
458 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
459 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
460 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
461 if (rc)
462 goto cipher_exit;
463 }
464
465 dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
466 cc_dma_buf_type(req_ctx->dma_buf_type));
467
468 return 0;
469
470cipher_exit:
471 cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
472 return rc;
473}
474
350int cc_buffer_mgr_init(struct cc_drvdata *drvdata) 475int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
351{ 476{
352 struct buff_mgr_handle *buff_mgr_handle; 477 struct buff_mgr_handle *buff_mgr_handle;