aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-01-05 15:10:06 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-01-05 15:10:06 -0500
commit64648a5fcabf46000a496c41c92c5c0f16be78ed (patch)
tree12e706ccc1a2e29c0fc6400589ac2a8c1c414ac3
parentd8887f1c7289848e74c92bd4322789a9cd7de699 (diff)
parent2973633e9f09311e849f975d969737af81a521ff (diff)
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto fixes from Herbert Xu: "This fixes the following issues: - racy use of ctx->rcvused in af_alg - algif_aead crash in chacha20poly1305 - freeing bogus pointer in pcrypt - build error on MIPS in mpi - memory leak in inside-secure - memory overwrite in inside-secure - NULL pointer dereference in inside-secure - state corruption in inside-secure - build error without CRYPTO_GF128MUL in chelsio - use after free in n2" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: crypto: inside-secure - do not use areq->result for partial results crypto: inside-secure - fix request allocations in invalidation path crypto: inside-secure - free requests even if their handling failed crypto: inside-secure - per request invalidation lib/mpi: Fix umul_ppmm() for MIPS64r6 crypto: pcrypt - fix freeing pcrypt instances crypto: n2 - cure use after free crypto: af_alg - Fix race around ctx->rcvused by making it atomic_t crypto: chacha20poly1305 - validate the digest size crypto: chelsio - select CRYPTO_GF128MUL
-rw-r--r--crypto/af_alg.c4
-rw-r--r--crypto/algif_aead.c2
-rw-r--r--crypto/algif_skcipher.c2
-rw-r--r--crypto/chacha20poly1305.c6
-rw-r--r--crypto/pcrypt.c19
-rw-r--r--drivers/crypto/chelsio/Kconfig1
-rw-r--r--drivers/crypto/inside-secure/safexcel.c1
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c85
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c89
-rw-r--r--drivers/crypto/n2_core.c3
-rw-r--r--include/crypto/if_alg.h5
-rw-r--r--lib/mpi/longlong.h18
12 files changed, 173 insertions, 62 deletions
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 444a387df219..35d4dcea381f 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -664,7 +664,7 @@ void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
664 unsigned int i; 664 unsigned int i;
665 665
666 list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) { 666 list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) {
667 ctx->rcvused -= rsgl->sg_num_bytes; 667 atomic_sub(rsgl->sg_num_bytes, &ctx->rcvused);
668 af_alg_free_sg(&rsgl->sgl); 668 af_alg_free_sg(&rsgl->sgl);
669 list_del(&rsgl->list); 669 list_del(&rsgl->list);
670 if (rsgl != &areq->first_rsgl) 670 if (rsgl != &areq->first_rsgl)
@@ -1163,7 +1163,7 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
1163 1163
1164 areq->last_rsgl = rsgl; 1164 areq->last_rsgl = rsgl;
1165 len += err; 1165 len += err;
1166 ctx->rcvused += err; 1166 atomic_add(err, &ctx->rcvused);
1167 rsgl->sg_num_bytes = err; 1167 rsgl->sg_num_bytes = err;
1168 iov_iter_advance(&msg->msg_iter, err); 1168 iov_iter_advance(&msg->msg_iter, err);
1169 } 1169 }
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index ddcc45f77edd..e9885a35ef6e 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -571,7 +571,7 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
571 INIT_LIST_HEAD(&ctx->tsgl_list); 571 INIT_LIST_HEAD(&ctx->tsgl_list);
572 ctx->len = len; 572 ctx->len = len;
573 ctx->used = 0; 573 ctx->used = 0;
574 ctx->rcvused = 0; 574 atomic_set(&ctx->rcvused, 0);
575 ctx->more = 0; 575 ctx->more = 0;
576 ctx->merge = 0; 576 ctx->merge = 0;
577 ctx->enc = 0; 577 ctx->enc = 0;
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index baef9bfccdda..c5c47b680152 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -390,7 +390,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
390 INIT_LIST_HEAD(&ctx->tsgl_list); 390 INIT_LIST_HEAD(&ctx->tsgl_list);
391 ctx->len = len; 391 ctx->len = len;
392 ctx->used = 0; 392 ctx->used = 0;
393 ctx->rcvused = 0; 393 atomic_set(&ctx->rcvused, 0);
394 ctx->more = 0; 394 ctx->more = 0;
395 ctx->merge = 0; 395 ctx->merge = 0;
396 ctx->enc = 0; 396 ctx->enc = 0;
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index db1bc3147bc4..600afa99941f 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -610,6 +610,11 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
610 algt->mask)); 610 algt->mask));
611 if (IS_ERR(poly)) 611 if (IS_ERR(poly))
612 return PTR_ERR(poly); 612 return PTR_ERR(poly);
613 poly_hash = __crypto_hash_alg_common(poly);
614
615 err = -EINVAL;
616 if (poly_hash->digestsize != POLY1305_DIGEST_SIZE)
617 goto out_put_poly;
613 618
614 err = -ENOMEM; 619 err = -ENOMEM;
615 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 620 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
@@ -618,7 +623,6 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
618 623
619 ctx = aead_instance_ctx(inst); 624 ctx = aead_instance_ctx(inst);
620 ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize; 625 ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize;
621 poly_hash = __crypto_hash_alg_common(poly);
622 err = crypto_init_ahash_spawn(&ctx->poly, poly_hash, 626 err = crypto_init_ahash_spawn(&ctx->poly, poly_hash,
623 aead_crypto_instance(inst)); 627 aead_crypto_instance(inst));
624 if (err) 628 if (err)
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index ee9cfb99fe25..f8ec3d4ba4a8 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -254,6 +254,14 @@ static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm)
254 crypto_free_aead(ctx->child); 254 crypto_free_aead(ctx->child);
255} 255}
256 256
257static void pcrypt_free(struct aead_instance *inst)
258{
259 struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst);
260
261 crypto_drop_aead(&ctx->spawn);
262 kfree(inst);
263}
264
257static int pcrypt_init_instance(struct crypto_instance *inst, 265static int pcrypt_init_instance(struct crypto_instance *inst,
258 struct crypto_alg *alg) 266 struct crypto_alg *alg)
259{ 267{
@@ -319,6 +327,8 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
319 inst->alg.encrypt = pcrypt_aead_encrypt; 327 inst->alg.encrypt = pcrypt_aead_encrypt;
320 inst->alg.decrypt = pcrypt_aead_decrypt; 328 inst->alg.decrypt = pcrypt_aead_decrypt;
321 329
330 inst->free = pcrypt_free;
331
322 err = aead_register_instance(tmpl, inst); 332 err = aead_register_instance(tmpl, inst);
323 if (err) 333 if (err)
324 goto out_drop_aead; 334 goto out_drop_aead;
@@ -349,14 +359,6 @@ static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)
349 return -EINVAL; 359 return -EINVAL;
350} 360}
351 361
352static void pcrypt_free(struct crypto_instance *inst)
353{
354 struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst);
355
356 crypto_drop_aead(&ctx->spawn);
357 kfree(inst);
358}
359
360static int pcrypt_cpumask_change_notify(struct notifier_block *self, 362static int pcrypt_cpumask_change_notify(struct notifier_block *self,
361 unsigned long val, void *data) 363 unsigned long val, void *data)
362{ 364{
@@ -469,7 +471,6 @@ static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
469static struct crypto_template pcrypt_tmpl = { 471static struct crypto_template pcrypt_tmpl = {
470 .name = "pcrypt", 472 .name = "pcrypt",
471 .create = pcrypt_create, 473 .create = pcrypt_create,
472 .free = pcrypt_free,
473 .module = THIS_MODULE, 474 .module = THIS_MODULE,
474}; 475};
475 476
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index 3e104f5aa0c2..b56b3f711d94 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -5,6 +5,7 @@ config CRYPTO_DEV_CHELSIO
5 select CRYPTO_SHA256 5 select CRYPTO_SHA256
6 select CRYPTO_SHA512 6 select CRYPTO_SHA512
7 select CRYPTO_AUTHENC 7 select CRYPTO_AUTHENC
8 select CRYPTO_GF128MUL
8 ---help--- 9 ---help---
9 The Chelsio Crypto Co-processor driver for T6 adapters. 10 The Chelsio Crypto Co-processor driver for T6 adapters.
10 11
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 89ba9e85c0f3..4bcef78a08aa 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -607,6 +607,7 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
607 ndesc = ctx->handle_result(priv, ring, sreq->req, 607 ndesc = ctx->handle_result(priv, ring, sreq->req,
608 &should_complete, &ret); 608 &should_complete, &ret);
609 if (ndesc < 0) { 609 if (ndesc < 0) {
610 kfree(sreq);
610 dev_err(priv->dev, "failed to handle result (%d)", ndesc); 611 dev_err(priv->dev, "failed to handle result (%d)", ndesc);
611 return; 612 return;
612 } 613 }
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 5438552bc6d7..fcc0a606d748 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -14,6 +14,7 @@
14 14
15#include <crypto/aes.h> 15#include <crypto/aes.h>
16#include <crypto/skcipher.h> 16#include <crypto/skcipher.h>
17#include <crypto/internal/skcipher.h>
17 18
18#include "safexcel.h" 19#include "safexcel.h"
19 20
@@ -33,6 +34,10 @@ struct safexcel_cipher_ctx {
33 unsigned int key_len; 34 unsigned int key_len;
34}; 35};
35 36
37struct safexcel_cipher_req {
38 bool needs_inv;
39};
40
36static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, 41static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
37 struct crypto_async_request *async, 42 struct crypto_async_request *async,
38 struct safexcel_command_desc *cdesc, 43 struct safexcel_command_desc *cdesc,
@@ -126,9 +131,9 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
126 return 0; 131 return 0;
127} 132}
128 133
129static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, 134static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
130 struct crypto_async_request *async, 135 struct crypto_async_request *async,
131 bool *should_complete, int *ret) 136 bool *should_complete, int *ret)
132{ 137{
133 struct skcipher_request *req = skcipher_request_cast(async); 138 struct skcipher_request *req = skcipher_request_cast(async);
134 struct safexcel_result_desc *rdesc; 139 struct safexcel_result_desc *rdesc;
@@ -265,7 +270,6 @@ static int safexcel_aes_send(struct crypto_async_request *async,
265 spin_unlock_bh(&priv->ring[ring].egress_lock); 270 spin_unlock_bh(&priv->ring[ring].egress_lock);
266 271
267 request->req = &req->base; 272 request->req = &req->base;
268 ctx->base.handle_result = safexcel_handle_result;
269 273
270 *commands = n_cdesc; 274 *commands = n_cdesc;
271 *results = n_rdesc; 275 *results = n_rdesc;
@@ -341,8 +345,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
341 345
342 ring = safexcel_select_ring(priv); 346 ring = safexcel_select_ring(priv);
343 ctx->base.ring = ring; 347 ctx->base.ring = ring;
344 ctx->base.needs_inv = false;
345 ctx->base.send = safexcel_aes_send;
346 348
347 spin_lock_bh(&priv->ring[ring].queue_lock); 349 spin_lock_bh(&priv->ring[ring].queue_lock);
348 enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async); 350 enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
@@ -359,6 +361,26 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
359 return ndesc; 361 return ndesc;
360} 362}
361 363
364static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
365 struct crypto_async_request *async,
366 bool *should_complete, int *ret)
367{
368 struct skcipher_request *req = skcipher_request_cast(async);
369 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
370 int err;
371
372 if (sreq->needs_inv) {
373 sreq->needs_inv = false;
374 err = safexcel_handle_inv_result(priv, ring, async,
375 should_complete, ret);
376 } else {
377 err = safexcel_handle_req_result(priv, ring, async,
378 should_complete, ret);
379 }
380
381 return err;
382}
383
362static int safexcel_cipher_send_inv(struct crypto_async_request *async, 384static int safexcel_cipher_send_inv(struct crypto_async_request *async,
363 int ring, struct safexcel_request *request, 385 int ring, struct safexcel_request *request,
364 int *commands, int *results) 386 int *commands, int *results)
@@ -368,8 +390,6 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async,
368 struct safexcel_crypto_priv *priv = ctx->priv; 390 struct safexcel_crypto_priv *priv = ctx->priv;
369 int ret; 391 int ret;
370 392
371 ctx->base.handle_result = safexcel_handle_inv_result;
372
373 ret = safexcel_invalidate_cache(async, &ctx->base, priv, 393 ret = safexcel_invalidate_cache(async, &ctx->base, priv,
374 ctx->base.ctxr_dma, ring, request); 394 ctx->base.ctxr_dma, ring, request);
375 if (unlikely(ret)) 395 if (unlikely(ret))
@@ -381,28 +401,46 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async,
381 return 0; 401 return 0;
382} 402}
383 403
404static int safexcel_send(struct crypto_async_request *async,
405 int ring, struct safexcel_request *request,
406 int *commands, int *results)
407{
408 struct skcipher_request *req = skcipher_request_cast(async);
409 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
410 int ret;
411
412 if (sreq->needs_inv)
413 ret = safexcel_cipher_send_inv(async, ring, request,
414 commands, results);
415 else
416 ret = safexcel_aes_send(async, ring, request,
417 commands, results);
418 return ret;
419}
420
384static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm) 421static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
385{ 422{
386 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); 423 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
387 struct safexcel_crypto_priv *priv = ctx->priv; 424 struct safexcel_crypto_priv *priv = ctx->priv;
388 struct skcipher_request req; 425 SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm));
426 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
389 struct safexcel_inv_result result = {}; 427 struct safexcel_inv_result result = {};
390 int ring = ctx->base.ring; 428 int ring = ctx->base.ring;
391 429
392 memset(&req, 0, sizeof(struct skcipher_request)); 430 memset(req, 0, sizeof(struct skcipher_request));
393 431
394 /* create invalidation request */ 432 /* create invalidation request */
395 init_completion(&result.completion); 433 init_completion(&result.completion);
396 skcipher_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG, 434 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
397 safexcel_inv_complete, &result); 435 safexcel_inv_complete, &result);
398 436
399 skcipher_request_set_tfm(&req, __crypto_skcipher_cast(tfm)); 437 skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
400 ctx = crypto_tfm_ctx(req.base.tfm); 438 ctx = crypto_tfm_ctx(req->base.tfm);
401 ctx->base.exit_inv = true; 439 ctx->base.exit_inv = true;
402 ctx->base.send = safexcel_cipher_send_inv; 440 sreq->needs_inv = true;
403 441
404 spin_lock_bh(&priv->ring[ring].queue_lock); 442 spin_lock_bh(&priv->ring[ring].queue_lock);
405 crypto_enqueue_request(&priv->ring[ring].queue, &req.base); 443 crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
406 spin_unlock_bh(&priv->ring[ring].queue_lock); 444 spin_unlock_bh(&priv->ring[ring].queue_lock);
407 445
408 if (!priv->ring[ring].need_dequeue) 446 if (!priv->ring[ring].need_dequeue)
@@ -424,19 +462,21 @@ static int safexcel_aes(struct skcipher_request *req,
424 enum safexcel_cipher_direction dir, u32 mode) 462 enum safexcel_cipher_direction dir, u32 mode)
425{ 463{
426 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 464 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
465 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
427 struct safexcel_crypto_priv *priv = ctx->priv; 466 struct safexcel_crypto_priv *priv = ctx->priv;
428 int ret, ring; 467 int ret, ring;
429 468
469 sreq->needs_inv = false;
430 ctx->direction = dir; 470 ctx->direction = dir;
431 ctx->mode = mode; 471 ctx->mode = mode;
432 472
433 if (ctx->base.ctxr) { 473 if (ctx->base.ctxr) {
434 if (ctx->base.needs_inv) 474 if (ctx->base.needs_inv) {
435 ctx->base.send = safexcel_cipher_send_inv; 475 sreq->needs_inv = true;
476 ctx->base.needs_inv = false;
477 }
436 } else { 478 } else {
437 ctx->base.ring = safexcel_select_ring(priv); 479 ctx->base.ring = safexcel_select_ring(priv);
438 ctx->base.send = safexcel_aes_send;
439
440 ctx->base.ctxr = dma_pool_zalloc(priv->context_pool, 480 ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
441 EIP197_GFP_FLAGS(req->base), 481 EIP197_GFP_FLAGS(req->base),
442 &ctx->base.ctxr_dma); 482 &ctx->base.ctxr_dma);
@@ -476,6 +516,11 @@ static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
476 alg.skcipher.base); 516 alg.skcipher.base);
477 517
478 ctx->priv = tmpl->priv; 518 ctx->priv = tmpl->priv;
519 ctx->base.send = safexcel_send;
520 ctx->base.handle_result = safexcel_handle_result;
521
522 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
523 sizeof(struct safexcel_cipher_req));
479 524
480 return 0; 525 return 0;
481} 526}
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 74feb6227101..0c5a5820b06e 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -32,9 +32,10 @@ struct safexcel_ahash_req {
32 bool last_req; 32 bool last_req;
33 bool finish; 33 bool finish;
34 bool hmac; 34 bool hmac;
35 bool needs_inv;
35 36
36 u8 state_sz; /* expected sate size, only set once */ 37 u8 state_sz; /* expected sate size, only set once */
37 u32 state[SHA256_DIGEST_SIZE / sizeof(u32)]; 38 u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
38 39
39 u64 len; 40 u64 len;
40 u64 processed; 41 u64 processed;
@@ -119,15 +120,15 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
119 } 120 }
120} 121}
121 122
122static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, 123static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
123 struct crypto_async_request *async, 124 struct crypto_async_request *async,
124 bool *should_complete, int *ret) 125 bool *should_complete, int *ret)
125{ 126{
126 struct safexcel_result_desc *rdesc; 127 struct safexcel_result_desc *rdesc;
127 struct ahash_request *areq = ahash_request_cast(async); 128 struct ahash_request *areq = ahash_request_cast(async);
128 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); 129 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
129 struct safexcel_ahash_req *sreq = ahash_request_ctx(areq); 130 struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
130 int cache_len, result_sz = sreq->state_sz; 131 int cache_len;
131 132
132 *ret = 0; 133 *ret = 0;
133 134
@@ -148,8 +149,8 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
148 spin_unlock_bh(&priv->ring[ring].egress_lock); 149 spin_unlock_bh(&priv->ring[ring].egress_lock);
149 150
150 if (sreq->finish) 151 if (sreq->finish)
151 result_sz = crypto_ahash_digestsize(ahash); 152 memcpy(areq->result, sreq->state,
152 memcpy(sreq->state, areq->result, result_sz); 153 crypto_ahash_digestsize(ahash));
153 154
154 dma_unmap_sg(priv->dev, areq->src, 155 dma_unmap_sg(priv->dev, areq->src,
155 sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE); 156 sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
@@ -165,9 +166,9 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
165 return 1; 166 return 1;
166} 167}
167 168
168static int safexcel_ahash_send(struct crypto_async_request *async, int ring, 169static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
169 struct safexcel_request *request, int *commands, 170 struct safexcel_request *request,
170 int *results) 171 int *commands, int *results)
171{ 172{
172 struct ahash_request *areq = ahash_request_cast(async); 173 struct ahash_request *areq = ahash_request_cast(async);
173 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); 174 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
@@ -273,7 +274,7 @@ send_command:
273 /* Add the token */ 274 /* Add the token */
274 safexcel_hash_token(first_cdesc, len, req->state_sz); 275 safexcel_hash_token(first_cdesc, len, req->state_sz);
275 276
276 ctx->base.result_dma = dma_map_single(priv->dev, areq->result, 277 ctx->base.result_dma = dma_map_single(priv->dev, req->state,
277 req->state_sz, DMA_FROM_DEVICE); 278 req->state_sz, DMA_FROM_DEVICE);
278 if (dma_mapping_error(priv->dev, ctx->base.result_dma)) { 279 if (dma_mapping_error(priv->dev, ctx->base.result_dma)) {
279 ret = -EINVAL; 280 ret = -EINVAL;
@@ -292,7 +293,6 @@ send_command:
292 293
293 req->processed += len; 294 req->processed += len;
294 request->req = &areq->base; 295 request->req = &areq->base;
295 ctx->base.handle_result = safexcel_handle_result;
296 296
297 *commands = n_cdesc; 297 *commands = n_cdesc;
298 *results = 1; 298 *results = 1;
@@ -374,8 +374,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
374 374
375 ring = safexcel_select_ring(priv); 375 ring = safexcel_select_ring(priv);
376 ctx->base.ring = ring; 376 ctx->base.ring = ring;
377 ctx->base.needs_inv = false;
378 ctx->base.send = safexcel_ahash_send;
379 377
380 spin_lock_bh(&priv->ring[ring].queue_lock); 378 spin_lock_bh(&priv->ring[ring].queue_lock);
381 enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async); 379 enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
@@ -392,6 +390,26 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
392 return 1; 390 return 1;
393} 391}
394 392
393static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
394 struct crypto_async_request *async,
395 bool *should_complete, int *ret)
396{
397 struct ahash_request *areq = ahash_request_cast(async);
398 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
399 int err;
400
401 if (req->needs_inv) {
402 req->needs_inv = false;
403 err = safexcel_handle_inv_result(priv, ring, async,
404 should_complete, ret);
405 } else {
406 err = safexcel_handle_req_result(priv, ring, async,
407 should_complete, ret);
408 }
409
410 return err;
411}
412
395static int safexcel_ahash_send_inv(struct crypto_async_request *async, 413static int safexcel_ahash_send_inv(struct crypto_async_request *async,
396 int ring, struct safexcel_request *request, 414 int ring, struct safexcel_request *request,
397 int *commands, int *results) 415 int *commands, int *results)
@@ -400,7 +418,6 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
400 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); 418 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
401 int ret; 419 int ret;
402 420
403 ctx->base.handle_result = safexcel_handle_inv_result;
404 ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv, 421 ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv,
405 ctx->base.ctxr_dma, ring, request); 422 ctx->base.ctxr_dma, ring, request);
406 if (unlikely(ret)) 423 if (unlikely(ret))
@@ -412,28 +429,46 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
412 return 0; 429 return 0;
413} 430}
414 431
432static int safexcel_ahash_send(struct crypto_async_request *async,
433 int ring, struct safexcel_request *request,
434 int *commands, int *results)
435{
436 struct ahash_request *areq = ahash_request_cast(async);
437 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
438 int ret;
439
440 if (req->needs_inv)
441 ret = safexcel_ahash_send_inv(async, ring, request,
442 commands, results);
443 else
444 ret = safexcel_ahash_send_req(async, ring, request,
445 commands, results);
446 return ret;
447}
448
415static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) 449static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
416{ 450{
417 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); 451 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
418 struct safexcel_crypto_priv *priv = ctx->priv; 452 struct safexcel_crypto_priv *priv = ctx->priv;
419 struct ahash_request req; 453 AHASH_REQUEST_ON_STACK(req, __crypto_ahash_cast(tfm));
454 struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
420 struct safexcel_inv_result result = {}; 455 struct safexcel_inv_result result = {};
421 int ring = ctx->base.ring; 456 int ring = ctx->base.ring;
422 457
423 memset(&req, 0, sizeof(struct ahash_request)); 458 memset(req, 0, sizeof(struct ahash_request));
424 459
425 /* create invalidation request */ 460 /* create invalidation request */
426 init_completion(&result.completion); 461 init_completion(&result.completion);
427 ahash_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG, 462 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
428 safexcel_inv_complete, &result); 463 safexcel_inv_complete, &result);
429 464
430 ahash_request_set_tfm(&req, __crypto_ahash_cast(tfm)); 465 ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
431 ctx = crypto_tfm_ctx(req.base.tfm); 466 ctx = crypto_tfm_ctx(req->base.tfm);
432 ctx->base.exit_inv = true; 467 ctx->base.exit_inv = true;
433 ctx->base.send = safexcel_ahash_send_inv; 468 rctx->needs_inv = true;
434 469
435 spin_lock_bh(&priv->ring[ring].queue_lock); 470 spin_lock_bh(&priv->ring[ring].queue_lock);
436 crypto_enqueue_request(&priv->ring[ring].queue, &req.base); 471 crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
437 spin_unlock_bh(&priv->ring[ring].queue_lock); 472 spin_unlock_bh(&priv->ring[ring].queue_lock);
438 473
439 if (!priv->ring[ring].need_dequeue) 474 if (!priv->ring[ring].need_dequeue)
@@ -481,14 +516,16 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
481 struct safexcel_crypto_priv *priv = ctx->priv; 516 struct safexcel_crypto_priv *priv = ctx->priv;
482 int ret, ring; 517 int ret, ring;
483 518
484 ctx->base.send = safexcel_ahash_send; 519 req->needs_inv = false;
485 520
486 if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) 521 if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
487 ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq); 522 ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
488 523
489 if (ctx->base.ctxr) { 524 if (ctx->base.ctxr) {
490 if (ctx->base.needs_inv) 525 if (ctx->base.needs_inv) {
491 ctx->base.send = safexcel_ahash_send_inv; 526 ctx->base.needs_inv = false;
527 req->needs_inv = true;
528 }
492 } else { 529 } else {
493 ctx->base.ring = safexcel_select_ring(priv); 530 ctx->base.ring = safexcel_select_ring(priv);
494 ctx->base.ctxr = dma_pool_zalloc(priv->context_pool, 531 ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
@@ -622,6 +659,8 @@ static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
622 struct safexcel_alg_template, alg.ahash); 659 struct safexcel_alg_template, alg.ahash);
623 660
624 ctx->priv = tmpl->priv; 661 ctx->priv = tmpl->priv;
662 ctx->base.send = safexcel_ahash_send;
663 ctx->base.handle_result = safexcel_handle_result;
625 664
626 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 665 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
627 sizeof(struct safexcel_ahash_req)); 666 sizeof(struct safexcel_ahash_req));
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 48de52cf2ecc..662e709812cc 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1625,6 +1625,7 @@ static int queue_cache_init(void)
1625 CWQ_ENTRY_SIZE, 0, NULL); 1625 CWQ_ENTRY_SIZE, 0, NULL);
1626 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) { 1626 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
1627 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); 1627 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1628 queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1628 return -ENOMEM; 1629 return -ENOMEM;
1629 } 1630 }
1630 return 0; 1631 return 0;
@@ -1634,6 +1635,8 @@ static void queue_cache_destroy(void)
1634{ 1635{
1635 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); 1636 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1636 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]); 1637 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
1638 queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1639 queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
1637} 1640}
1638 1641
1639static long spu_queue_register_workfn(void *arg) 1642static long spu_queue_register_workfn(void *arg)
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index 38d9c5861ed8..f38227a78eae 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -18,6 +18,7 @@
18#include <linux/if_alg.h> 18#include <linux/if_alg.h>
19#include <linux/scatterlist.h> 19#include <linux/scatterlist.h>
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/atomic.h>
21#include <net/sock.h> 22#include <net/sock.h>
22 23
23#include <crypto/aead.h> 24#include <crypto/aead.h>
@@ -150,7 +151,7 @@ struct af_alg_ctx {
150 struct crypto_wait wait; 151 struct crypto_wait wait;
151 152
152 size_t used; 153 size_t used;
153 size_t rcvused; 154 atomic_t rcvused;
154 155
155 bool more; 156 bool more;
156 bool merge; 157 bool merge;
@@ -215,7 +216,7 @@ static inline int af_alg_rcvbuf(struct sock *sk)
215 struct af_alg_ctx *ctx = ask->private; 216 struct af_alg_ctx *ctx = ask->private;
216 217
217 return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) - 218 return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) -
218 ctx->rcvused, 0); 219 atomic_read(&ctx->rcvused), 0);
219} 220}
220 221
221/** 222/**
diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
index 57fd45ab7af1..08c60d10747f 100644
--- a/lib/mpi/longlong.h
+++ b/lib/mpi/longlong.h
@@ -671,7 +671,23 @@ do { \
671 ************** MIPS/64 ************** 671 ************** MIPS/64 **************
672 ***************************************/ 672 ***************************************/
673#if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64 673#if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64
674#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4) 674#if defined(__mips_isa_rev) && __mips_isa_rev >= 6
675/*
676 * GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C
677 * code below, so we special case MIPS64r6 until the compiler can do better.
678 */
679#define umul_ppmm(w1, w0, u, v) \
680do { \
681 __asm__ ("dmulu %0,%1,%2" \
682 : "=d" ((UDItype)(w0)) \
683 : "d" ((UDItype)(u)), \
684 "d" ((UDItype)(v))); \
685 __asm__ ("dmuhu %0,%1,%2" \
686 : "=d" ((UDItype)(w1)) \
687 : "d" ((UDItype)(u)), \
688 "d" ((UDItype)(v))); \
689} while (0)
690#elif (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
675#define umul_ppmm(w1, w0, u, v) \ 691#define umul_ppmm(w1, w0, u, v) \
676do { \ 692do { \
677 typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \ 693 typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \