aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorDmitry Kasatkin <dmitry.kasatkin@nokia.com>2010-11-19 09:04:29 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2010-11-27 03:37:19 -0500
commit887c883eea9867535059f3c8414c8cfc952ccff1 (patch)
tree2a35344c0b1de81b5b4a70bfe416c6605c5809c7 /drivers/crypto
parenta55b290b0e41e02d1969589c5d77d966ac2b7ec8 (diff)
crypto: omap-sham - zero-copy scatterlist handling
If scatterlist have more than one entry, current driver uses aligned buffer to copy data to to accelerator to tackle possible issues with DMA and SHA buffer alignment. This commit adds more intelligence to verify SG alignment and possibility to use DMA directly on the data without using copy buffer. Signed-off-by: Dmitry Kasatkin <dmitry.kasatkin@nokia.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/omap-sham.c87
1 files changed, 61 insertions, 26 deletions
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index d88d7ebfffa7..eb988e7a2fd9 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -74,7 +74,7 @@
74 74
75#define FLAGS_FINUP 0x0002 75#define FLAGS_FINUP 0x0002
76#define FLAGS_FINAL 0x0004 76#define FLAGS_FINAL 0x0004
77#define FLAGS_FAST 0x0008 77#define FLAGS_SG 0x0008
78#define FLAGS_SHA1 0x0010 78#define FLAGS_SHA1 0x0010
79#define FLAGS_DMA_ACTIVE 0x0020 79#define FLAGS_DMA_ACTIVE 0x0020
80#define FLAGS_OUTPUT_READY 0x0040 80#define FLAGS_OUTPUT_READY 0x0040
@@ -393,6 +393,8 @@ static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
393 return -EINVAL; 393 return -EINVAL;
394 } 394 }
395 395
396 ctx->flags &= ~FLAGS_SG;
397
396 /* next call does not fail... so no unmap in the case of error */ 398 /* next call does not fail... so no unmap in the case of error */
397 return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final); 399 return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final);
398} 400}
@@ -403,9 +405,6 @@ static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
403 unsigned int final; 405 unsigned int final;
404 size_t count; 406 size_t count;
405 407
406 if (!ctx->total)
407 return 0;
408
409 omap_sham_append_sg(ctx); 408 omap_sham_append_sg(ctx);
410 409
411 final = (ctx->flags & FLAGS_FINUP) && !ctx->total; 410 final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
@@ -422,25 +421,62 @@ static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
422 return 0; 421 return 0;
423} 422}
424 423
425static int omap_sham_update_dma_fast(struct omap_sham_dev *dd) 424/* Start address alignment */
425#define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32)))
426/* SHA1 block size alignment */
427#define SG_SA(sg) (IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE))
428
429static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
426{ 430{
427 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 431 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
428 unsigned int length; 432 unsigned int length, final, tail;
433 struct scatterlist *sg;
434
435 if (!ctx->total)
436 return 0;
437
438 if (ctx->bufcnt || ctx->offset)
439 return omap_sham_update_dma_slow(dd);
440
441 dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
442 ctx->digcnt, ctx->bufcnt, ctx->total);
443
444 sg = ctx->sg;
429 445
430 ctx->flags |= FLAGS_FAST; 446 if (!SG_AA(sg))
447 return omap_sham_update_dma_slow(dd);
431 448
432 length = min(ctx->total, sg_dma_len(ctx->sg)); 449 if (!sg_is_last(sg) && !SG_SA(sg))
433 ctx->total = length; 450 /* size is not SHA1_BLOCK_SIZE aligned */
451 return omap_sham_update_dma_slow(dd);
452
453 length = min(ctx->total, sg->length);
454
455 if (sg_is_last(sg)) {
456 if (!(ctx->flags & FLAGS_FINUP)) {
457 /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */
458 tail = length & (SHA1_MD5_BLOCK_SIZE - 1);
459 /* without finup() we need one block to close hash */
460 if (!tail)
461 tail = SHA1_MD5_BLOCK_SIZE;
462 length -= tail;
463 }
464 }
434 465
435 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { 466 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
436 dev_err(dd->dev, "dma_map_sg error\n"); 467 dev_err(dd->dev, "dma_map_sg error\n");
437 return -EINVAL; 468 return -EINVAL;
438 } 469 }
439 470
471 ctx->flags |= FLAGS_SG;
472
440 ctx->total -= length; 473 ctx->total -= length;
474 ctx->offset = length; /* offset where to start slow */
475
476 final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
441 477
442 /* next call does not fail... so no unmap in the case of error */ 478 /* next call does not fail... so no unmap in the case of error */
443 return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, 1); 479 return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final);
444} 480}
445 481
446static int omap_sham_update_cpu(struct omap_sham_dev *dd) 482static int omap_sham_update_cpu(struct omap_sham_dev *dd)
@@ -460,11 +496,17 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
460 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 496 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
461 497
462 omap_stop_dma(dd->dma_lch); 498 omap_stop_dma(dd->dma_lch);
463 if (ctx->flags & FLAGS_FAST) 499 if (ctx->flags & FLAGS_SG) {
464 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); 500 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
465 else 501 if (ctx->sg->length == ctx->offset) {
502 ctx->sg = sg_next(ctx->sg);
503 if (ctx->sg)
504 ctx->offset = 0;
505 }
506 } else {
466 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen, 507 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
467 DMA_TO_DEVICE); 508 DMA_TO_DEVICE);
509 }
468 510
469 return 0; 511 return 0;
470} 512}
@@ -545,10 +587,8 @@ static int omap_sham_update_req(struct omap_sham_dev *dd)
545 587
546 if (ctx->flags & FLAGS_CPU) 588 if (ctx->flags & FLAGS_CPU)
547 err = omap_sham_update_cpu(dd); 589 err = omap_sham_update_cpu(dd);
548 else if (ctx->flags & FLAGS_FAST)
549 err = omap_sham_update_dma_fast(dd);
550 else 590 else
551 err = omap_sham_update_dma_slow(dd); 591 err = omap_sham_update_dma_start(dd);
552 592
553 /* wait for dma completion before can take more data */ 593 /* wait for dma completion before can take more data */
554 dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt); 594 dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
@@ -730,18 +770,13 @@ static int omap_sham_update(struct ahash_request *req)
730 */ 770 */
731 omap_sham_append_sg(ctx); 771 omap_sham_append_sg(ctx);
732 return 0; 772 return 0;
733 } else if (ctx->bufcnt + ctx->total <= 64) { 773 } else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) {
774 /*
775 * faster to use CPU for short transfers
776 */
734 ctx->flags |= FLAGS_CPU; 777 ctx->flags |= FLAGS_CPU;
735 } else if (!ctx->bufcnt && sg_is_last(ctx->sg)) {
736 /* may be can use faster functions */
737 int aligned = IS_ALIGNED((u32)ctx->sg->offset,
738 sizeof(u32));
739 if (aligned)
740 /* digest: first and final */
741 ctx->flags |= FLAGS_FAST;
742 } 778 }
743 } else if (ctx->bufcnt + ctx->total <= ctx->buflen) { 779 } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
744 /* if not finaup -> not fast */
745 omap_sham_append_sg(ctx); 780 omap_sham_append_sg(ctx);
746 return 0; 781 return 0;
747 } 782 }
@@ -1026,7 +1061,7 @@ static void omap_sham_done_task(unsigned long data)
1026 dd->flags &= ~FLAGS_DMA_ACTIVE; 1061 dd->flags &= ~FLAGS_DMA_ACTIVE;
1027 omap_sham_update_dma_stop(dd); 1062 omap_sham_update_dma_stop(dd);
1028 if (!dd->err) 1063 if (!dd->err)
1029 err = omap_sham_update_dma_slow(dd); 1064 err = omap_sham_update_dma_start(dd);
1030 } 1065 }
1031 1066
1032 err = dd->err ? : err; 1067 err = dd->err ? : err;