aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDmitry Kasatkin <dmitry.kasatkin@nokia.com>2010-11-19 09:04:26 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2010-11-27 03:37:18 -0500
commit798eed5d9204b01862985ba0643ce5cf84114072 (patch)
tree98f3bc95e53863ab850510ebb8a1ebff4ef22686 /drivers
parenta5d87237bb15eed8449e5a30c0bbe626e0e7f43d (diff)
crypto: omap-sham - crypto_ahash_final() now not need to be called.
According to the Herbert Xu, client may not always call crypto_ahash_final(). In the case of error in hash calculation resources will be automatically cleaned up. But if no hash calculation error happens and client will not call crypto_ahash_final() at all, then internal buffer will not be freed, and clocks will not be disabled. This patch provides support for atomic crypto_ahash_update() call. Clocks are now enabled and disabled per update request. Data buffer is now allocated as a part of request context. Client is obligated to free it with crypto_free_ahash(). Signed-off-by: Dmitry Kasatkin <dmitry.kasatkin@nokia.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/crypto/omap-sham.c168
1 files changed, 82 insertions, 86 deletions
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 6340c5ef4712..85d627774538 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -89,6 +89,11 @@
89#define OP_UPDATE 1 89#define OP_UPDATE 1
90#define OP_FINAL 2 90#define OP_FINAL 2
91 91
92#define OMAP_ALIGN_MASK (sizeof(u32)-1)
93#define OMAP_ALIGNED __attribute__((aligned(sizeof(u32))))
94
95#define BUFLEN PAGE_SIZE
96
92struct omap_sham_dev; 97struct omap_sham_dev;
93 98
94struct omap_sham_reqctx { 99struct omap_sham_reqctx {
@@ -96,9 +101,8 @@ struct omap_sham_reqctx {
96 unsigned long flags; 101 unsigned long flags;
97 unsigned long op; 102 unsigned long op;
98 103
99 u8 digest[SHA1_DIGEST_SIZE]; 104 u8 digest[SHA1_DIGEST_SIZE] OMAP_ALIGNED;
100 size_t digcnt; 105 size_t digcnt;
101 u8 *buffer;
102 size_t bufcnt; 106 size_t bufcnt;
103 size_t buflen; 107 size_t buflen;
104 dma_addr_t dma_addr; 108 dma_addr_t dma_addr;
@@ -107,6 +111,8 @@ struct omap_sham_reqctx {
107 struct scatterlist *sg; 111 struct scatterlist *sg;
108 unsigned int offset; /* offset in current sg */ 112 unsigned int offset; /* offset in current sg */
109 unsigned int total; /* total request */ 113 unsigned int total; /* total request */
114
115 u8 buffer[0] OMAP_ALIGNED;
110}; 116};
111 117
112struct omap_sham_hmac_ctx { 118struct omap_sham_hmac_ctx {
@@ -219,31 +225,33 @@ static void omap_sham_copy_hash(struct ahash_request *req, int out)
219 } 225 }
220} 226}
221 227
222static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length, 228static int omap_sham_hw_init(struct omap_sham_dev *dd)
223 int final, int dma)
224{ 229{
225 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 230 clk_enable(dd->iclk);
226 u32 val = length << 5, mask;
227 231
228 if (unlikely(!ctx->digcnt)) { 232 if (!(dd->flags & FLAGS_INIT)) {
233 omap_sham_write_mask(dd, SHA_REG_MASK,
234 SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
229 235
230 clk_enable(dd->iclk); 236 if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
237 SHA_REG_SYSSTATUS_RESETDONE))
238 return -ETIMEDOUT;
231 239
232 if (!(dd->flags & FLAGS_INIT)) { 240 dd->flags |= FLAGS_INIT;
233 omap_sham_write_mask(dd, SHA_REG_MASK, 241 dd->err = 0;
234 SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET); 242 }
235 243
236 if (omap_sham_wait(dd, SHA_REG_SYSSTATUS, 244 return 0;
237 SHA_REG_SYSSTATUS_RESETDONE)) { 245}
238 clk_disable(dd->iclk); 246
239 return -ETIMEDOUT; 247static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
240 } 248 int final, int dma)
241 dd->flags |= FLAGS_INIT; 249{
242 dd->err = 0; 250 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
243 } 251 u32 val = length << 5, mask;
244 } else { 252
253 if (likely(ctx->digcnt))
245 omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt); 254 omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt);
246 }
247 255
248 omap_sham_write_mask(dd, SHA_REG_MASK, 256 omap_sham_write_mask(dd, SHA_REG_MASK,
249 SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0), 257 SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
@@ -263,23 +271,19 @@ static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
263 SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH; 271 SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
264 272
265 omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask); 273 omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
266
267 return 0;
268} 274}
269 275
270static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, 276static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
271 size_t length, int final) 277 size_t length, int final)
272{ 278{
273 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 279 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
274 int err, count, len32; 280 int count, len32;
275 const u32 *buffer = (const u32 *)buf; 281 const u32 *buffer = (const u32 *)buf;
276 282
277 dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", 283 dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
278 ctx->digcnt, length, final); 284 ctx->digcnt, length, final);
279 285
280 err = omap_sham_write_ctrl(dd, length, final, 0); 286 omap_sham_write_ctrl(dd, length, final, 0);
281 if (err)
282 return err;
283 287
284 /* should be non-zero before next lines to disable clocks later */ 288 /* should be non-zero before next lines to disable clocks later */
285 ctx->digcnt += length; 289 ctx->digcnt += length;
@@ -302,14 +306,10 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
302 size_t length, int final) 306 size_t length, int final)
303{ 307{
304 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 308 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
305 int err, len32; 309 int len32;
306 310
307 dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", 311 dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
308 ctx->digcnt, length, final); 312 ctx->digcnt, length, final);
309 /* flush cache entries related to our page */
310 if (dma_addr == ctx->dma_addr)
311 dma_sync_single_for_device(dd->dev, dma_addr, length,
312 DMA_TO_DEVICE);
313 313
314 len32 = DIV_ROUND_UP(length, sizeof(u32)); 314 len32 = DIV_ROUND_UP(length, sizeof(u32));
315 315
@@ -320,19 +320,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
320 omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC, 320 omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC,
321 dma_addr, 0, 0); 321 dma_addr, 0, 0);
322 322
323 omap_set_dma_dest_params(dd->dma_lch, 0, 323 omap_sham_write_ctrl(dd, length, final, 1);
324 OMAP_DMA_AMODE_CONSTANT,
325 dd->phys_base + SHA_REG_DIN(0), 0, 16);
326
327 omap_set_dma_dest_burst_mode(dd->dma_lch,
328 OMAP_DMA_DATA_BURST_16);
329
330 omap_set_dma_src_burst_mode(dd->dma_lch,
331 OMAP_DMA_DATA_BURST_4);
332
333 err = omap_sham_write_ctrl(dd, length, final, 1);
334 if (err)
335 return err;
336 324
337 ctx->digcnt += length; 325 ctx->digcnt += length;
338 326
@@ -384,6 +372,21 @@ static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx)
384 return 0; 372 return 0;
385} 373}
386 374
375static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
376 struct omap_sham_reqctx *ctx,
377 size_t length, int final)
378{
379 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
380 DMA_TO_DEVICE);
381 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
382 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
383 return -EINVAL;
384 }
385
386 /* next call does not fail... so no unmap in the case of error */
387 return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final);
388}
389
387static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) 390static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
388{ 391{
389 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 392 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
@@ -403,7 +406,7 @@ static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
403 if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) { 406 if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
404 count = ctx->bufcnt; 407 count = ctx->bufcnt;
405 ctx->bufcnt = 0; 408 ctx->bufcnt = 0;
406 return omap_sham_xmit_dma(dd, ctx->dma_addr, count, final); 409 return omap_sham_xmit_dma_map(dd, ctx, count, final);
407 } 410 }
408 411
409 return 0; 412 return 0;
@@ -413,7 +416,6 @@ static int omap_sham_update_dma_fast(struct omap_sham_dev *dd)
413{ 416{
414 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 417 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
415 unsigned int length; 418 unsigned int length;
416 int err;
417 419
418 ctx->flags |= FLAGS_FAST; 420 ctx->flags |= FLAGS_FAST;
419 421
@@ -427,11 +429,8 @@ static int omap_sham_update_dma_fast(struct omap_sham_dev *dd)
427 429
428 ctx->total -= length; 430 ctx->total -= length;
429 431
430 err = omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, 1); 432 /* next call does not fail... so no unmap in the case of error */
431 if (err != -EINPROGRESS) 433 return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, 1);
432 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
433
434 return err;
435} 434}
436 435
437static int omap_sham_update_cpu(struct omap_sham_dev *dd) 436static int omap_sham_update_cpu(struct omap_sham_dev *dd)
@@ -453,6 +452,9 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
453 omap_stop_dma(dd->dma_lch); 452 omap_stop_dma(dd->dma_lch);
454 if (ctx->flags & FLAGS_FAST) 453 if (ctx->flags & FLAGS_FAST)
455 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); 454 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
455 else
456 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
457 DMA_TO_DEVICE);
456 458
457 return 0; 459 return 0;
458} 460}
@@ -471,18 +473,9 @@ static void omap_sham_cleanup(struct ahash_request *req)
471 ctx->flags |= FLAGS_CLEAN; 473 ctx->flags |= FLAGS_CLEAN;
472 spin_unlock_irqrestore(&dd->lock, flags); 474 spin_unlock_irqrestore(&dd->lock, flags);
473 475
474 if (ctx->digcnt) { 476 if (ctx->digcnt)
475 clk_disable(dd->iclk);
476 memcpy(req->result, ctx->digest, (ctx->flags & FLAGS_SHA1) ? 477 memcpy(req->result, ctx->digest, (ctx->flags & FLAGS_SHA1) ?
477 SHA1_DIGEST_SIZE : MD5_DIGEST_SIZE); 478 SHA1_DIGEST_SIZE : MD5_DIGEST_SIZE);
478 }
479
480 if (ctx->dma_addr)
481 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
482 DMA_TO_DEVICE);
483
484 if (ctx->buffer)
485 free_page((unsigned long)ctx->buffer);
486 479
487 dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt); 480 dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
488} 481}
@@ -520,21 +513,7 @@ static int omap_sham_init(struct ahash_request *req)
520 513
521 ctx->bufcnt = 0; 514 ctx->bufcnt = 0;
522 ctx->digcnt = 0; 515 ctx->digcnt = 0;
523 516 ctx->buflen = BUFLEN;
524 ctx->buflen = PAGE_SIZE;
525 ctx->buffer = (void *)__get_free_page(
526 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
527 GFP_KERNEL : GFP_ATOMIC);
528 if (!ctx->buffer)
529 return -ENOMEM;
530
531 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
532 DMA_TO_DEVICE);
533 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
534 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
535 free_page((unsigned long)ctx->buffer);
536 return -EINVAL;
537 }
538 517
539 if (tctx->flags & FLAGS_HMAC) { 518 if (tctx->flags & FLAGS_HMAC) {
540 struct omap_sham_hmac_ctx *bctx = tctx->base; 519 struct omap_sham_hmac_ctx *bctx = tctx->base;
@@ -581,7 +560,7 @@ static int omap_sham_final_req(struct omap_sham_dev *dd)
581 use_dma = 0; 560 use_dma = 0;
582 561
583 if (use_dma) 562 if (use_dma)
584 err = omap_sham_xmit_dma(dd, ctx->dma_addr, ctx->bufcnt, 1); 563 err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1);
585 else 564 else
586 err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1); 565 err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
587 566
@@ -615,6 +594,7 @@ static int omap_sham_finish_req_hmac(struct ahash_request *req)
615static void omap_sham_finish_req(struct ahash_request *req, int err) 594static void omap_sham_finish_req(struct ahash_request *req, int err)
616{ 595{
617 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 596 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
597 struct omap_sham_dev *dd = ctx->dd;
618 598
619 if (!err) { 599 if (!err) {
620 omap_sham_copy_hash(ctx->dd->req, 1); 600 omap_sham_copy_hash(ctx->dd->req, 1);
@@ -627,7 +607,8 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
627 if ((ctx->flags & FLAGS_FINAL) || err) 607 if ((ctx->flags & FLAGS_FINAL) || err)
628 omap_sham_cleanup(req); 608 omap_sham_cleanup(req);
629 609
630 ctx->dd->flags &= ~FLAGS_BUSY; 610 clk_disable(dd->iclk);
611 dd->flags &= ~FLAGS_BUSY;
631 612
632 if (req->base.complete) 613 if (req->base.complete)
633 req->base.complete(&req->base, err); 614 req->base.complete(&req->base, err);
@@ -636,7 +617,7 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
636static int omap_sham_handle_queue(struct omap_sham_dev *dd, 617static int omap_sham_handle_queue(struct omap_sham_dev *dd,
637 struct ahash_request *req) 618 struct ahash_request *req)
638{ 619{
639 struct crypto_async_request *async_req, *backlog; 620 struct crypto_async_request *async_req, *backlog = 0;
640 struct omap_sham_reqctx *ctx; 621 struct omap_sham_reqctx *ctx;
641 struct ahash_request *prev_req; 622 struct ahash_request *prev_req;
642 unsigned long flags; 623 unsigned long flags;
@@ -672,7 +653,22 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd,
672 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", 653 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
673 ctx->op, req->nbytes); 654 ctx->op, req->nbytes);
674 655
675 if (req != prev_req && ctx->digcnt) 656
657 err = omap_sham_hw_init(dd);
658 if (err)
659 goto err1;
660
661 omap_set_dma_dest_params(dd->dma_lch, 0,
662 OMAP_DMA_AMODE_CONSTANT,
663 dd->phys_base + SHA_REG_DIN(0), 0, 16);
664
665 omap_set_dma_dest_burst_mode(dd->dma_lch,
666 OMAP_DMA_DATA_BURST_16);
667
668 omap_set_dma_src_burst_mode(dd->dma_lch,
669 OMAP_DMA_DATA_BURST_4);
670
671 if (ctx->digcnt)
676 /* request has changed - restore hash */ 672 /* request has changed - restore hash */
677 omap_sham_copy_hash(req, 0); 673 omap_sham_copy_hash(req, 0);
678 674
@@ -684,7 +680,7 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd,
684 } else if (ctx->op == OP_FINAL) { 680 } else if (ctx->op == OP_FINAL) {
685 err = omap_sham_final_req(dd); 681 err = omap_sham_final_req(dd);
686 } 682 }
687 683err1:
688 if (err != -EINPROGRESS) { 684 if (err != -EINPROGRESS) {
689 /* done_task will not finish it, so do it here */ 685 /* done_task will not finish it, so do it here */
690 omap_sham_finish_req(req, err); 686 omap_sham_finish_req(req, err);
@@ -868,7 +864,7 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
868 } 864 }
869 865
870 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 866 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
871 sizeof(struct omap_sham_reqctx)); 867 sizeof(struct omap_sham_reqctx) + BUFLEN);
872 868
873 if (alg_base) { 869 if (alg_base) {
874 struct omap_sham_hmac_ctx *bctx = tctx->base; 870 struct omap_sham_hmac_ctx *bctx = tctx->base;
@@ -954,7 +950,7 @@ static struct ahash_alg algs[] = {
954 CRYPTO_ALG_NEED_FALLBACK, 950 CRYPTO_ALG_NEED_FALLBACK,
955 .cra_blocksize = SHA1_BLOCK_SIZE, 951 .cra_blocksize = SHA1_BLOCK_SIZE,
956 .cra_ctxsize = sizeof(struct omap_sham_ctx), 952 .cra_ctxsize = sizeof(struct omap_sham_ctx),
957 .cra_alignmask = 0, 953 .cra_alignmask = OMAP_ALIGN_MASK,
958 .cra_module = THIS_MODULE, 954 .cra_module = THIS_MODULE,
959 .cra_init = omap_sham_cra_init, 955 .cra_init = omap_sham_cra_init,
960 .cra_exit = omap_sham_cra_exit, 956 .cra_exit = omap_sham_cra_exit,
@@ -978,7 +974,7 @@ static struct ahash_alg algs[] = {
978 .cra_blocksize = SHA1_BLOCK_SIZE, 974 .cra_blocksize = SHA1_BLOCK_SIZE,
979 .cra_ctxsize = sizeof(struct omap_sham_ctx) + 975 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
980 sizeof(struct omap_sham_hmac_ctx), 976 sizeof(struct omap_sham_hmac_ctx),
981 .cra_alignmask = 0, 977 .cra_alignmask = OMAP_ALIGN_MASK,
982 .cra_module = THIS_MODULE, 978 .cra_module = THIS_MODULE,
983 .cra_init = omap_sham_cra_sha1_init, 979 .cra_init = omap_sham_cra_sha1_init,
984 .cra_exit = omap_sham_cra_exit, 980 .cra_exit = omap_sham_cra_exit,
@@ -1002,7 +998,7 @@ static struct ahash_alg algs[] = {
1002 .cra_blocksize = SHA1_BLOCK_SIZE, 998 .cra_blocksize = SHA1_BLOCK_SIZE,
1003 .cra_ctxsize = sizeof(struct omap_sham_ctx) + 999 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1004 sizeof(struct omap_sham_hmac_ctx), 1000 sizeof(struct omap_sham_hmac_ctx),
1005 .cra_alignmask = 0, 1001 .cra_alignmask = OMAP_ALIGN_MASK,
1006 .cra_module = THIS_MODULE, 1002 .cra_module = THIS_MODULE,
1007 .cra_init = omap_sham_cra_md5_init, 1003 .cra_init = omap_sham_cra_md5_init,
1008 .cra_exit = omap_sham_cra_exit, 1004 .cra_exit = omap_sham_cra_exit,