aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGilad Ben-Yossef <gilad@benyossef.com>2019-04-18 09:38:46 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2019-04-25 03:38:14 -0400
commita108f9311c01271bccad45d321cf9ddfac852c4b (patch)
tree61f1684502035612cc7e8e52bf3acb5bdcf2c933
parentdcf6285d18ea147b3366de14121825be82a243f2 (diff)
crypto: ccree - fix backlog notifications
We were doing backlog notification callbacks via a cipher/hash/aead request structure cast to the base structure, which may or may not work based on how the structure is laid in memory and is not safe. Fix it by delegating the backlog notification to the appropriate internal callbacks which are type aware. Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com> Cc: stable@vger.kernel.org # v4.19+ Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--drivers/crypto/ccree/cc_aead.c4
-rw-r--r--drivers/crypto/ccree/cc_cipher.c10
-rw-r--r--drivers/crypto/ccree/cc_hash.c28
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.c11
4 files changed, 39 insertions, 14 deletions
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index c5cde327cf1f..1fa3c7fef851 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -220,6 +220,10 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err)
220 struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req); 220 struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
221 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 221 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
222 222
223 /* BACKLOG notification */
224 if (err == -EINPROGRESS)
225 goto done;
226
223 cc_unmap_aead_request(dev, areq); 227 cc_unmap_aead_request(dev, areq);
224 228
225 /* Restore ordinary iv pointer */ 229 /* Restore ordinary iv pointer */
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 15da3a35a6a1..1ba7c8a7bd52 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -818,9 +818,13 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
818 struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req); 818 struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
819 unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm); 819 unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
820 820
821 cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst); 821 if (err != -EINPROGRESS) {
822 memcpy(req->iv, req_ctx->iv, ivsize); 822 /* Not a BACKLOG notification */
823 kzfree(req_ctx->iv); 823 cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
824 memcpy(req->iv, req_ctx->iv, ivsize);
825 kzfree(req_ctx->iv);
826 }
827
824 skcipher_request_complete(req, err); 828 skcipher_request_complete(req, err);
825} 829}
826 830
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index 2c4ddc8fb76b..e824ab60b59c 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -280,8 +280,12 @@ static void cc_update_complete(struct device *dev, void *cc_req, int err)
280 280
281 dev_dbg(dev, "req=%pK\n", req); 281 dev_dbg(dev, "req=%pK\n", req);
282 282
283 cc_unmap_hash_request(dev, state, req->src, false); 283 if (err != -EINPROGRESS) {
284 cc_unmap_req(dev, state, ctx); 284 /* Not a BACKLOG notification */
285 cc_unmap_hash_request(dev, state, req->src, false);
286 cc_unmap_req(dev, state, ctx);
287 }
288
285 req->base.complete(&req->base, err); 289 req->base.complete(&req->base, err);
286} 290}
287 291
@@ -295,9 +299,13 @@ static void cc_digest_complete(struct device *dev, void *cc_req, int err)
295 299
296 dev_dbg(dev, "req=%pK\n", req); 300 dev_dbg(dev, "req=%pK\n", req);
297 301
298 cc_unmap_hash_request(dev, state, req->src, false); 302 if (err != -EINPROGRESS) {
299 cc_unmap_result(dev, state, digestsize, req->result); 303 /* Not a BACKLOG notification */
300 cc_unmap_req(dev, state, ctx); 304 cc_unmap_hash_request(dev, state, req->src, false);
305 cc_unmap_result(dev, state, digestsize, req->result);
306 cc_unmap_req(dev, state, ctx);
307 }
308
301 req->base.complete(&req->base, err); 309 req->base.complete(&req->base, err);
302} 310}
303 311
@@ -311,9 +319,13 @@ static void cc_hash_complete(struct device *dev, void *cc_req, int err)
311 319
312 dev_dbg(dev, "req=%pK\n", req); 320 dev_dbg(dev, "req=%pK\n", req);
313 321
314 cc_unmap_hash_request(dev, state, req->src, false); 322 if (err != -EINPROGRESS) {
315 cc_unmap_result(dev, state, digestsize, req->result); 323 /* Not a BACKLOG notification */
316 cc_unmap_req(dev, state, ctx); 324 cc_unmap_hash_request(dev, state, req->src, false);
325 cc_unmap_result(dev, state, digestsize, req->result);
326 cc_unmap_req(dev, state, ctx);
327 }
328
317 req->base.complete(&req->base, err); 329 req->base.complete(&req->base, err);
318} 330}
319 331
diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c
index 88c97a580dd8..c2e8190bb067 100644
--- a/drivers/crypto/ccree/cc_request_mgr.c
+++ b/drivers/crypto/ccree/cc_request_mgr.c
@@ -364,10 +364,12 @@ static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
364 struct cc_bl_item *bli) 364 struct cc_bl_item *bli)
365{ 365{
366 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; 366 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
367 struct device *dev = drvdata_to_dev(drvdata);
367 368
368 spin_lock_bh(&mgr->bl_lock); 369 spin_lock_bh(&mgr->bl_lock);
369 list_add_tail(&bli->list, &mgr->backlog); 370 list_add_tail(&bli->list, &mgr->backlog);
370 ++mgr->bl_len; 371 ++mgr->bl_len;
372 dev_dbg(dev, "+++bl len: %d\n", mgr->bl_len);
371 spin_unlock_bh(&mgr->bl_lock); 373 spin_unlock_bh(&mgr->bl_lock);
372 tasklet_schedule(&mgr->comptask); 374 tasklet_schedule(&mgr->comptask);
373} 375}
@@ -377,7 +379,7 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata)
377 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; 379 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
378 struct cc_bl_item *bli; 380 struct cc_bl_item *bli;
379 struct cc_crypto_req *creq; 381 struct cc_crypto_req *creq;
380 struct crypto_async_request *req; 382 void *req;
381 bool ivgen; 383 bool ivgen;
382 unsigned int total_len; 384 unsigned int total_len;
383 struct device *dev = drvdata_to_dev(drvdata); 385 struct device *dev = drvdata_to_dev(drvdata);
@@ -387,17 +389,20 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata)
387 389
388 while (mgr->bl_len) { 390 while (mgr->bl_len) {
389 bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list); 391 bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list);
392 dev_dbg(dev, "---bl len: %d\n", mgr->bl_len);
393
390 spin_unlock(&mgr->bl_lock); 394 spin_unlock(&mgr->bl_lock);
391 395
396
392 creq = &bli->creq; 397 creq = &bli->creq;
393 req = (struct crypto_async_request *)creq->user_arg; 398 req = creq->user_arg;
394 399
395 /* 400 /*
396 * Notify the request we're moving out of the backlog 401 * Notify the request we're moving out of the backlog
397 * but only if we haven't done so already. 402 * but only if we haven't done so already.
398 */ 403 */
399 if (!bli->notif) { 404 if (!bli->notif) {
400 req->complete(req, -EINPROGRESS); 405 creq->user_cb(dev, req, -EINPROGRESS);
401 bli->notif = true; 406 bli->notif = true;
402 } 407 }
403 408