aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAntoine Ténart <antoine.tenart@free-electrons.com>2017-12-11 06:10:57 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2017-12-22 03:48:00 -0500
commit7cad2fabd5691dbb17762877d4e7f236fe4bc181 (patch)
tree8e95ed4374b2603865df65c1e5895fc50ee0c97d
parent0a02dcca126280595950f3ea809f77c9cb0a235c (diff)
crypto: inside-secure - fix request allocations in invalidation path
This patch makes use of the SKCIPHER_REQUEST_ON_STACK and AHASH_REQUEST_ON_STACK helpers to allocate enough memory to contain both the crypto request structures and their embedded context (__ctx). Fixes: 1b44c5a60c13 ("crypto: inside-secure - add SafeXcel EIP197 crypto engine driver") Suggested-by: Ofer Heifetz <oferh@marvell.com> Signed-off-by: Antoine Tenart <antoine.tenart@free-electrons.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c16
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c14
2 files changed, 15 insertions, 15 deletions
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 9ea24868d860..fcc0a606d748 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -422,25 +422,25 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
422{ 422{
423 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); 423 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
424 struct safexcel_crypto_priv *priv = ctx->priv; 424 struct safexcel_crypto_priv *priv = ctx->priv;
425 struct skcipher_request req; 425 SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm));
426 struct safexcel_cipher_req *sreq = skcipher_request_ctx(&req); 426 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
427 struct safexcel_inv_result result = {}; 427 struct safexcel_inv_result result = {};
428 int ring = ctx->base.ring; 428 int ring = ctx->base.ring;
429 429
430 memset(&req, 0, sizeof(struct skcipher_request)); 430 memset(req, 0, sizeof(struct skcipher_request));
431 431
432 /* create invalidation request */ 432 /* create invalidation request */
433 init_completion(&result.completion); 433 init_completion(&result.completion);
434 skcipher_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG, 434 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
435 safexcel_inv_complete, &result); 435 safexcel_inv_complete, &result);
436 436
437 skcipher_request_set_tfm(&req, __crypto_skcipher_cast(tfm)); 437 skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
438 ctx = crypto_tfm_ctx(req.base.tfm); 438 ctx = crypto_tfm_ctx(req->base.tfm);
439 ctx->base.exit_inv = true; 439 ctx->base.exit_inv = true;
440 sreq->needs_inv = true; 440 sreq->needs_inv = true;
441 441
442 spin_lock_bh(&priv->ring[ring].queue_lock); 442 spin_lock_bh(&priv->ring[ring].queue_lock);
443 crypto_enqueue_request(&priv->ring[ring].queue, &req.base); 443 crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
444 spin_unlock_bh(&priv->ring[ring].queue_lock); 444 spin_unlock_bh(&priv->ring[ring].queue_lock);
445 445
446 if (!priv->ring[ring].need_dequeue) 446 if (!priv->ring[ring].need_dequeue)
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 79fe149804d3..55ff8a340b11 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -450,25 +450,25 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
450{ 450{
451 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); 451 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
452 struct safexcel_crypto_priv *priv = ctx->priv; 452 struct safexcel_crypto_priv *priv = ctx->priv;
453 struct ahash_request req; 453 AHASH_REQUEST_ON_STACK(req, __crypto_ahash_cast(tfm));
454 struct safexcel_ahash_req *rctx = ahash_request_ctx(&req); 454 struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
455 struct safexcel_inv_result result = {}; 455 struct safexcel_inv_result result = {};
456 int ring = ctx->base.ring; 456 int ring = ctx->base.ring;
457 457
458 memset(&req, 0, sizeof(struct ahash_request)); 458 memset(req, 0, sizeof(struct ahash_request));
459 459
460 /* create invalidation request */ 460 /* create invalidation request */
461 init_completion(&result.completion); 461 init_completion(&result.completion);
462 ahash_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG, 462 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
463 safexcel_inv_complete, &result); 463 safexcel_inv_complete, &result);
464 464
465 ahash_request_set_tfm(&req, __crypto_ahash_cast(tfm)); 465 ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
466 ctx = crypto_tfm_ctx(req.base.tfm); 466 ctx = crypto_tfm_ctx(req->base.tfm);
467 ctx->base.exit_inv = true; 467 ctx->base.exit_inv = true;
468 rctx->needs_inv = true; 468 rctx->needs_inv = true;
469 469
470 spin_lock_bh(&priv->ring[ring].queue_lock); 470 spin_lock_bh(&priv->ring[ring].queue_lock);
471 crypto_enqueue_request(&priv->ring[ring].queue, &req.base); 471 crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
472 spin_unlock_bh(&priv->ring[ring].queue_lock); 472 spin_unlock_bh(&priv->ring[ring].queue_lock);
473 473
474 if (!priv->ring[ring].need_dequeue) 474 if (!priv->ring[ring].need_dequeue)