aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRomain Perier <romain.perier@free-electrons.com>2016-12-14 09:15:07 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2016-12-16 06:59:39 -0500
commit8759fec4af222f338d08f8f1a7ad6a77ca6cb301 (patch)
tree095109417f57fdbc6b2558e827ca8a5bb3f73128
parent18e615ad87bce9125ef3990377a4a946ec0f21f3 (diff)
crypto: marvell - Copy IVDIG before launching partial DMA ahash requests
Currently, inner IV/DIGEST data are only copied once into the hash engines and not set explicitly before launching a request that is not a first frag. This is an issue especially when multiple ahash reqs are computed in parallel or chained with cipher request, as the state of the request being computed is not updated into the hash engine. It leads to non-deterministic corrupted digest results. Fixes: commit 2786cee8e50b ("crypto: marvell - Move SRAM I/O operations to step functions") Signed-off-by: Romain Perier <romain.perier@free-electrons.com> Acked-by: Boris Brezillon <boris.brezillon@free-electrons.com> Cc: <stable@vger.kernel.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--drivers/crypto/marvell/cesa.h3
-rw-r--r--drivers/crypto/marvell/hash.c34
-rw-r--r--drivers/crypto/marvell/tdma.c9
3 files changed, 43 insertions, 3 deletions
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index a768da7138a1..b7872f62f674 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -273,7 +273,8 @@ struct mv_cesa_op_ctx {
273#define CESA_TDMA_SRC_IN_SRAM BIT(30) 273#define CESA_TDMA_SRC_IN_SRAM BIT(30)
274#define CESA_TDMA_END_OF_REQ BIT(29) 274#define CESA_TDMA_END_OF_REQ BIT(29)
275#define CESA_TDMA_BREAK_CHAIN BIT(28) 275#define CESA_TDMA_BREAK_CHAIN BIT(28)
276#define CESA_TDMA_TYPE_MSK GENMASK(27, 0) 276#define CESA_TDMA_SET_STATE BIT(27)
277#define CESA_TDMA_TYPE_MSK GENMASK(26, 0)
277#define CESA_TDMA_DUMMY 0 278#define CESA_TDMA_DUMMY 0
278#define CESA_TDMA_DATA 1 279#define CESA_TDMA_DATA 1
279#define CESA_TDMA_OP 2 280#define CESA_TDMA_OP 2
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 2a9260559654..585c90f9f606 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -281,13 +281,32 @@ static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
281 sreq->offset = 0; 281 sreq->offset = 0;
282} 282}
283 283
284static void mv_cesa_ahash_dma_step(struct ahash_request *req)
285{
286 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
287 struct mv_cesa_req *base = &creq->base;
288
289 /* We must explicitly set the digest state. */
290 if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
291 struct mv_cesa_engine *engine = base->engine;
292 int i;
293
294 /* Set the hash state in the IVDIG regs. */
295 for (i = 0; i < ARRAY_SIZE(creq->state); i++)
296 writel_relaxed(creq->state[i], engine->regs +
297 CESA_IVDIG(i));
298 }
299
300 mv_cesa_dma_step(base);
301}
302
284static void mv_cesa_ahash_step(struct crypto_async_request *req) 303static void mv_cesa_ahash_step(struct crypto_async_request *req)
285{ 304{
286 struct ahash_request *ahashreq = ahash_request_cast(req); 305 struct ahash_request *ahashreq = ahash_request_cast(req);
287 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 306 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
288 307
289 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 308 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
290 mv_cesa_dma_step(&creq->base); 309 mv_cesa_ahash_dma_step(ahashreq);
291 else 310 else
292 mv_cesa_ahash_std_step(ahashreq); 311 mv_cesa_ahash_std_step(ahashreq);
293} 312}
@@ -585,12 +604,16 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
585 struct mv_cesa_ahash_dma_iter iter; 604 struct mv_cesa_ahash_dma_iter iter;
586 struct mv_cesa_op_ctx *op = NULL; 605 struct mv_cesa_op_ctx *op = NULL;
587 unsigned int frag_len; 606 unsigned int frag_len;
607 bool set_state = false;
588 int ret; 608 int ret;
589 u32 type; 609 u32 type;
590 610
591 basereq->chain.first = NULL; 611 basereq->chain.first = NULL;
592 basereq->chain.last = NULL; 612 basereq->chain.last = NULL;
593 613
614 if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
615 set_state = true;
616
594 if (creq->src_nents) { 617 if (creq->src_nents) {
595 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, 618 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
596 DMA_TO_DEVICE); 619 DMA_TO_DEVICE);
@@ -684,6 +707,15 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
684 if (type != CESA_TDMA_RESULT) 707 if (type != CESA_TDMA_RESULT)
685 basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN; 708 basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
686 709
710 if (set_state) {
711 /*
712 * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
713 * let the step logic know that the IVDIG registers should be
714 * explicitly set before launching a TDMA chain.
715 */
716 basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
717 }
718
687 return 0; 719 return 0;
688 720
689err_free_tdma: 721err_free_tdma:
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c
index 4416b88eca70..c76375ff376d 100644
--- a/drivers/crypto/marvell/tdma.c
+++ b/drivers/crypto/marvell/tdma.c
@@ -109,7 +109,14 @@ void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
109 last->next = dreq->chain.first; 109 last->next = dreq->chain.first;
110 engine->chain.last = dreq->chain.last; 110 engine->chain.last = dreq->chain.last;
111 111
112 if (!(last->flags & CESA_TDMA_BREAK_CHAIN)) 112 /*
113 * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
114 * the last element of the current chain, or if the request
115 * being queued needs the IV regs to be set before lauching
116 * the request.
117 */
118 if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
119 !(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
113 last->next_dma = dreq->chain.first->cur_dma; 120 last->next_dma = dreq->chain.first->cur_dma;
114 } 121 }
115} 122}