aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/omap-sham.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/omap-sham.c')
-rw-r--r--drivers/crypto/omap-sham.c180
1 files changed, 90 insertions, 90 deletions
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index ba8f1ea84c5..6399a8f1938 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -72,17 +72,20 @@
72 72
73#define DEFAULT_TIMEOUT_INTERVAL HZ 73#define DEFAULT_TIMEOUT_INTERVAL HZ
74 74
75#define FLAGS_FINUP 0x0002 75/* mostly device flags */
76#define FLAGS_FINAL 0x0004 76#define FLAGS_BUSY 0
77#define FLAGS_SG 0x0008 77#define FLAGS_FINAL 1
78#define FLAGS_SHA1 0x0010 78#define FLAGS_DMA_ACTIVE 2
79#define FLAGS_DMA_ACTIVE 0x0020 79#define FLAGS_OUTPUT_READY 3
80#define FLAGS_OUTPUT_READY 0x0040 80#define FLAGS_INIT 4
81#define FLAGS_INIT 0x0100 81#define FLAGS_CPU 5
82#define FLAGS_CPU 0x0200 82#define FLAGS_DMA_READY 6
83#define FLAGS_HMAC 0x0400 83/* context flags */
84#define FLAGS_ERROR 0x0800 84#define FLAGS_FINUP 16
85#define FLAGS_BUSY 0x1000 85#define FLAGS_SG 17
86#define FLAGS_SHA1 18
87#define FLAGS_HMAC 19
88#define FLAGS_ERROR 20
86 89
87#define OP_UPDATE 1 90#define OP_UPDATE 1
88#define OP_FINAL 2 91#define OP_FINAL 2
@@ -144,7 +147,6 @@ struct omap_sham_dev {
144 int dma; 147 int dma;
145 int dma_lch; 148 int dma_lch;
146 struct tasklet_struct done_task; 149 struct tasklet_struct done_task;
147 struct tasklet_struct queue_task;
148 150
149 unsigned long flags; 151 unsigned long flags;
150 struct crypto_queue queue; 152 struct crypto_queue queue;
@@ -223,7 +225,7 @@ static void omap_sham_copy_ready_hash(struct ahash_request *req)
223 if (!hash) 225 if (!hash)
224 return; 226 return;
225 227
226 if (likely(ctx->flags & FLAGS_SHA1)) { 228 if (likely(ctx->flags & BIT(FLAGS_SHA1))) {
227 /* SHA1 results are in big endian */ 229 /* SHA1 results are in big endian */
228 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) 230 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
229 hash[i] = be32_to_cpu(in[i]); 231 hash[i] = be32_to_cpu(in[i]);
@@ -238,7 +240,7 @@ static int omap_sham_hw_init(struct omap_sham_dev *dd)
238{ 240{
239 clk_enable(dd->iclk); 241 clk_enable(dd->iclk);
240 242
241 if (!(dd->flags & FLAGS_INIT)) { 243 if (!test_bit(FLAGS_INIT, &dd->flags)) {
242 omap_sham_write_mask(dd, SHA_REG_MASK, 244 omap_sham_write_mask(dd, SHA_REG_MASK,
243 SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET); 245 SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
244 246
@@ -246,7 +248,7 @@ static int omap_sham_hw_init(struct omap_sham_dev *dd)
246 SHA_REG_SYSSTATUS_RESETDONE)) 248 SHA_REG_SYSSTATUS_RESETDONE))
247 return -ETIMEDOUT; 249 return -ETIMEDOUT;
248 250
249 dd->flags |= FLAGS_INIT; 251 set_bit(FLAGS_INIT, &dd->flags);
250 dd->err = 0; 252 dd->err = 0;
251 } 253 }
252 254
@@ -269,7 +271,7 @@ static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
269 * Setting ALGO_CONST only for the first iteration 271 * Setting ALGO_CONST only for the first iteration
270 * and CLOSE_HASH only for the last one. 272 * and CLOSE_HASH only for the last one.
271 */ 273 */
272 if (ctx->flags & FLAGS_SHA1) 274 if (ctx->flags & BIT(FLAGS_SHA1))
273 val |= SHA_REG_CTRL_ALGO; 275 val |= SHA_REG_CTRL_ALGO;
274 if (!ctx->digcnt) 276 if (!ctx->digcnt)
275 val |= SHA_REG_CTRL_ALGO_CONST; 277 val |= SHA_REG_CTRL_ALGO_CONST;
@@ -301,7 +303,9 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
301 return -ETIMEDOUT; 303 return -ETIMEDOUT;
302 304
303 if (final) 305 if (final)
304 ctx->flags |= FLAGS_FINAL; /* catch last interrupt */ 306 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
307
308 set_bit(FLAGS_CPU, &dd->flags);
305 309
306 len32 = DIV_ROUND_UP(length, sizeof(u32)); 310 len32 = DIV_ROUND_UP(length, sizeof(u32));
307 311
@@ -334,9 +338,9 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
334 ctx->digcnt += length; 338 ctx->digcnt += length;
335 339
336 if (final) 340 if (final)
337 ctx->flags |= FLAGS_FINAL; /* catch last interrupt */ 341 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
338 342
339 dd->flags |= FLAGS_DMA_ACTIVE; 343 set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
340 344
341 omap_start_dma(dd->dma_lch); 345 omap_start_dma(dd->dma_lch);
342 346
@@ -392,7 +396,7 @@ static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
392 return -EINVAL; 396 return -EINVAL;
393 } 397 }
394 398
395 ctx->flags &= ~FLAGS_SG; 399 ctx->flags &= ~BIT(FLAGS_SG);
396 400
397 /* next call does not fail... so no unmap in the case of error */ 401 /* next call does not fail... so no unmap in the case of error */
398 return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final); 402 return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final);
@@ -406,7 +410,7 @@ static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
406 410
407 omap_sham_append_sg(ctx); 411 omap_sham_append_sg(ctx);
408 412
409 final = (ctx->flags & FLAGS_FINUP) && !ctx->total; 413 final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
410 414
411 dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n", 415 dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
412 ctx->bufcnt, ctx->digcnt, final); 416 ctx->bufcnt, ctx->digcnt, final);
@@ -452,7 +456,7 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
452 length = min(ctx->total, sg->length); 456 length = min(ctx->total, sg->length);
453 457
454 if (sg_is_last(sg)) { 458 if (sg_is_last(sg)) {
455 if (!(ctx->flags & FLAGS_FINUP)) { 459 if (!(ctx->flags & BIT(FLAGS_FINUP))) {
456 /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */ 460 /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */
457 tail = length & (SHA1_MD5_BLOCK_SIZE - 1); 461 tail = length & (SHA1_MD5_BLOCK_SIZE - 1);
458 /* without finup() we need one block to close hash */ 462 /* without finup() we need one block to close hash */
@@ -467,12 +471,12 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
467 return -EINVAL; 471 return -EINVAL;
468 } 472 }
469 473
470 ctx->flags |= FLAGS_SG; 474 ctx->flags |= BIT(FLAGS_SG);
471 475
472 ctx->total -= length; 476 ctx->total -= length;
473 ctx->offset = length; /* offset where to start slow */ 477 ctx->offset = length; /* offset where to start slow */
474 478
475 final = (ctx->flags & FLAGS_FINUP) && !ctx->total; 479 final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
476 480
477 /* next call does not fail... so no unmap in the case of error */ 481 /* next call does not fail... so no unmap in the case of error */
478 return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final); 482 return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final);
@@ -495,7 +499,7 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
495 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 499 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
496 500
497 omap_stop_dma(dd->dma_lch); 501 omap_stop_dma(dd->dma_lch);
498 if (ctx->flags & FLAGS_SG) { 502 if (ctx->flags & BIT(FLAGS_SG)) {
499 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); 503 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
500 if (ctx->sg->length == ctx->offset) { 504 if (ctx->sg->length == ctx->offset) {
501 ctx->sg = sg_next(ctx->sg); 505 ctx->sg = sg_next(ctx->sg);
@@ -537,18 +541,18 @@ static int omap_sham_init(struct ahash_request *req)
537 crypto_ahash_digestsize(tfm)); 541 crypto_ahash_digestsize(tfm));
538 542
539 if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE) 543 if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
540 ctx->flags |= FLAGS_SHA1; 544 ctx->flags |= BIT(FLAGS_SHA1);
541 545
542 ctx->bufcnt = 0; 546 ctx->bufcnt = 0;
543 ctx->digcnt = 0; 547 ctx->digcnt = 0;
544 ctx->buflen = BUFLEN; 548 ctx->buflen = BUFLEN;
545 549
546 if (tctx->flags & FLAGS_HMAC) { 550 if (tctx->flags & BIT(FLAGS_HMAC)) {
547 struct omap_sham_hmac_ctx *bctx = tctx->base; 551 struct omap_sham_hmac_ctx *bctx = tctx->base;
548 552
549 memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE); 553 memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE);
550 ctx->bufcnt = SHA1_MD5_BLOCK_SIZE; 554 ctx->bufcnt = SHA1_MD5_BLOCK_SIZE;
551 ctx->flags |= FLAGS_HMAC; 555 ctx->flags |= BIT(FLAGS_HMAC);
552 } 556 }
553 557
554 return 0; 558 return 0;
@@ -562,9 +566,9 @@ static int omap_sham_update_req(struct omap_sham_dev *dd)
562 int err; 566 int err;
563 567
564 dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n", 568 dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
565 ctx->total, ctx->digcnt, (ctx->flags & FLAGS_FINUP) != 0); 569 ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0);
566 570
567 if (ctx->flags & FLAGS_CPU) 571 if (ctx->flags & BIT(FLAGS_CPU))
568 err = omap_sham_update_cpu(dd); 572 err = omap_sham_update_cpu(dd);
569 else 573 else
570 err = omap_sham_update_dma_start(dd); 574 err = omap_sham_update_dma_start(dd);
@@ -624,7 +628,7 @@ static int omap_sham_finish(struct ahash_request *req)
624 628
625 if (ctx->digcnt) { 629 if (ctx->digcnt) {
626 omap_sham_copy_ready_hash(req); 630 omap_sham_copy_ready_hash(req);
627 if (ctx->flags & FLAGS_HMAC) 631 if (ctx->flags & BIT(FLAGS_HMAC))
628 err = omap_sham_finish_hmac(req); 632 err = omap_sham_finish_hmac(req);
629 } 633 }
630 634
@@ -639,18 +643,23 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
639 struct omap_sham_dev *dd = ctx->dd; 643 struct omap_sham_dev *dd = ctx->dd;
640 644
641 if (!err) { 645 if (!err) {
642 omap_sham_copy_hash(ctx->dd->req, 1); 646 omap_sham_copy_hash(req, 1);
643 if (ctx->flags & FLAGS_FINAL) 647 if (test_bit(FLAGS_FINAL, &dd->flags))
644 err = omap_sham_finish(req); 648 err = omap_sham_finish(req);
645 } else { 649 } else {
646 ctx->flags |= FLAGS_ERROR; 650 ctx->flags |= BIT(FLAGS_ERROR);
647 } 651 }
648 652
653 /* atomic operation is not needed here */
654 dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
655 BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
649 clk_disable(dd->iclk); 656 clk_disable(dd->iclk);
650 dd->flags &= ~FLAGS_BUSY;
651 657
652 if (req->base.complete) 658 if (req->base.complete)
653 req->base.complete(&req->base, err); 659 req->base.complete(&req->base, err);
660
661 /* handle new request */
662 tasklet_schedule(&dd->done_task);
654} 663}
655 664
656static int omap_sham_handle_queue(struct omap_sham_dev *dd, 665static int omap_sham_handle_queue(struct omap_sham_dev *dd,
@@ -658,21 +667,20 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd,
658{ 667{
659 struct crypto_async_request *async_req, *backlog; 668 struct crypto_async_request *async_req, *backlog;
660 struct omap_sham_reqctx *ctx; 669 struct omap_sham_reqctx *ctx;
661 struct ahash_request *prev_req;
662 unsigned long flags; 670 unsigned long flags;
663 int err = 0, ret = 0; 671 int err = 0, ret = 0;
664 672
665 spin_lock_irqsave(&dd->lock, flags); 673 spin_lock_irqsave(&dd->lock, flags);
666 if (req) 674 if (req)
667 ret = ahash_enqueue_request(&dd->queue, req); 675 ret = ahash_enqueue_request(&dd->queue, req);
668 if (dd->flags & FLAGS_BUSY) { 676 if (test_bit(FLAGS_BUSY, &dd->flags)) {
669 spin_unlock_irqrestore(&dd->lock, flags); 677 spin_unlock_irqrestore(&dd->lock, flags);
670 return ret; 678 return ret;
671 } 679 }
672 backlog = crypto_get_backlog(&dd->queue); 680 backlog = crypto_get_backlog(&dd->queue);
673 async_req = crypto_dequeue_request(&dd->queue); 681 async_req = crypto_dequeue_request(&dd->queue);
674 if (async_req) 682 if (async_req)
675 dd->flags |= FLAGS_BUSY; 683 set_bit(FLAGS_BUSY, &dd->flags);
676 spin_unlock_irqrestore(&dd->lock, flags); 684 spin_unlock_irqrestore(&dd->lock, flags);
677 685
678 if (!async_req) 686 if (!async_req)
@@ -682,16 +690,12 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd,
682 backlog->complete(backlog, -EINPROGRESS); 690 backlog->complete(backlog, -EINPROGRESS);
683 691
684 req = ahash_request_cast(async_req); 692 req = ahash_request_cast(async_req);
685
686 prev_req = dd->req;
687 dd->req = req; 693 dd->req = req;
688
689 ctx = ahash_request_ctx(req); 694 ctx = ahash_request_ctx(req);
690 695
691 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", 696 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
692 ctx->op, req->nbytes); 697 ctx->op, req->nbytes);
693 698
694
695 err = omap_sham_hw_init(dd); 699 err = omap_sham_hw_init(dd);
696 if (err) 700 if (err)
697 goto err1; 701 goto err1;
@@ -712,18 +716,16 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd,
712 716
713 if (ctx->op == OP_UPDATE) { 717 if (ctx->op == OP_UPDATE) {
714 err = omap_sham_update_req(dd); 718 err = omap_sham_update_req(dd);
715 if (err != -EINPROGRESS && (ctx->flags & FLAGS_FINUP)) 719 if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP)))
716 /* no final() after finup() */ 720 /* no final() after finup() */
717 err = omap_sham_final_req(dd); 721 err = omap_sham_final_req(dd);
718 } else if (ctx->op == OP_FINAL) { 722 } else if (ctx->op == OP_FINAL) {
719 err = omap_sham_final_req(dd); 723 err = omap_sham_final_req(dd);
720 } 724 }
721err1: 725err1:
722 if (err != -EINPROGRESS) { 726 if (err != -EINPROGRESS)
723 /* done_task will not finish it, so do it here */ 727 /* done_task will not finish it, so do it here */
724 omap_sham_finish_req(req, err); 728 omap_sham_finish_req(req, err);
725 tasklet_schedule(&dd->queue_task);
726 }
727 729
728 dev_dbg(dd->dev, "exit, err: %d\n", err); 730 dev_dbg(dd->dev, "exit, err: %d\n", err);
729 731
@@ -752,7 +754,7 @@ static int omap_sham_update(struct ahash_request *req)
752 ctx->sg = req->src; 754 ctx->sg = req->src;
753 ctx->offset = 0; 755 ctx->offset = 0;
754 756
755 if (ctx->flags & FLAGS_FINUP) { 757 if (ctx->flags & BIT(FLAGS_FINUP)) {
756 if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) { 758 if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) {
757 /* 759 /*
758 * OMAP HW accel works only with buffers >= 9 760 * OMAP HW accel works only with buffers >= 9
@@ -765,7 +767,7 @@ static int omap_sham_update(struct ahash_request *req)
765 /* 767 /*
766 * faster to use CPU for short transfers 768 * faster to use CPU for short transfers
767 */ 769 */
768 ctx->flags |= FLAGS_CPU; 770 ctx->flags |= BIT(FLAGS_CPU);
769 } 771 }
770 } else if (ctx->bufcnt + ctx->total < ctx->buflen) { 772 } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
771 omap_sham_append_sg(ctx); 773 omap_sham_append_sg(ctx);
@@ -802,9 +804,9 @@ static int omap_sham_final(struct ahash_request *req)
802{ 804{
803 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 805 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
804 806
805 ctx->flags |= FLAGS_FINUP; 807 ctx->flags |= BIT(FLAGS_FINUP);
806 808
807 if (ctx->flags & FLAGS_ERROR) 809 if (ctx->flags & BIT(FLAGS_ERROR))
808 return 0; /* uncompleted hash is not needed */ 810 return 0; /* uncompleted hash is not needed */
809 811
810 /* OMAP HW accel works only with buffers >= 9 */ 812 /* OMAP HW accel works only with buffers >= 9 */
@@ -823,7 +825,7 @@ static int omap_sham_finup(struct ahash_request *req)
823 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 825 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
824 int err1, err2; 826 int err1, err2;
825 827
826 ctx->flags |= FLAGS_FINUP; 828 ctx->flags |= BIT(FLAGS_FINUP);
827 829
828 err1 = omap_sham_update(req); 830 err1 = omap_sham_update(req);
829 if (err1 == -EINPROGRESS || err1 == -EBUSY) 831 if (err1 == -EINPROGRESS || err1 == -EBUSY)
@@ -895,7 +897,7 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
895 897
896 if (alg_base) { 898 if (alg_base) {
897 struct omap_sham_hmac_ctx *bctx = tctx->base; 899 struct omap_sham_hmac_ctx *bctx = tctx->base;
898 tctx->flags |= FLAGS_HMAC; 900 tctx->flags |= BIT(FLAGS_HMAC);
899 bctx->shash = crypto_alloc_shash(alg_base, 0, 901 bctx->shash = crypto_alloc_shash(alg_base, 0,
900 CRYPTO_ALG_NEED_FALLBACK); 902 CRYPTO_ALG_NEED_FALLBACK);
901 if (IS_ERR(bctx->shash)) { 903 if (IS_ERR(bctx->shash)) {
@@ -932,7 +934,7 @@ static void omap_sham_cra_exit(struct crypto_tfm *tfm)
932 crypto_free_shash(tctx->fallback); 934 crypto_free_shash(tctx->fallback);
933 tctx->fallback = NULL; 935 tctx->fallback = NULL;
934 936
935 if (tctx->flags & FLAGS_HMAC) { 937 if (tctx->flags & BIT(FLAGS_HMAC)) {
936 struct omap_sham_hmac_ctx *bctx = tctx->base; 938 struct omap_sham_hmac_ctx *bctx = tctx->base;
937 crypto_free_shash(bctx->shash); 939 crypto_free_shash(bctx->shash);
938 } 940 }
@@ -1036,51 +1038,46 @@ static struct ahash_alg algs[] = {
1036static void omap_sham_done_task(unsigned long data) 1038static void omap_sham_done_task(unsigned long data)
1037{ 1039{
1038 struct omap_sham_dev *dd = (struct omap_sham_dev *)data; 1040 struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
1039 struct ahash_request *req = dd->req; 1041 int err = 0;
1040 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1041 int ready = 0, err = 0;
1042 1042
1043 if (ctx->flags & FLAGS_OUTPUT_READY) { 1043 if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1044 ctx->flags &= ~FLAGS_OUTPUT_READY; 1044 omap_sham_handle_queue(dd, NULL);
1045 ready = 1; 1045 return;
1046 } 1046 }
1047 1047
1048 if (dd->flags & FLAGS_DMA_ACTIVE) { 1048 if (test_bit(FLAGS_CPU, &dd->flags)) {
1049 dd->flags &= ~FLAGS_DMA_ACTIVE; 1049 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
1050 omap_sham_update_dma_stop(dd); 1050 goto finish;
1051 if (!dd->err) 1051 } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
1052 if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
1053 omap_sham_update_dma_stop(dd);
1054 if (dd->err) {
1055 err = dd->err;
1056 goto finish;
1057 }
1058 }
1059 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
1060 /* hash or semi-hash ready */
1061 clear_bit(FLAGS_DMA_READY, &dd->flags);
1052 err = omap_sham_update_dma_start(dd); 1062 err = omap_sham_update_dma_start(dd);
1063 if (err != -EINPROGRESS)
1064 goto finish;
1065 }
1053 } 1066 }
1054 1067
1055 err = dd->err ? : err; 1068 return;
1056
1057 if (err != -EINPROGRESS && (ready || err)) {
1058 dev_dbg(dd->dev, "update done: err: %d\n", err);
1059 /* finish curent request */
1060 omap_sham_finish_req(req, err);
1061 /* start new request */
1062 omap_sham_handle_queue(dd, NULL);
1063 }
1064}
1065
1066static void omap_sham_queue_task(unsigned long data)
1067{
1068 struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
1069 1069
1070 omap_sham_handle_queue(dd, NULL); 1070finish:
1071 dev_dbg(dd->dev, "update done: err: %d\n", err);
1072 /* finish curent request */
1073 omap_sham_finish_req(dd->req, err);
1071} 1074}
1072 1075
1073static irqreturn_t omap_sham_irq(int irq, void *dev_id) 1076static irqreturn_t omap_sham_irq(int irq, void *dev_id)
1074{ 1077{
1075 struct omap_sham_dev *dd = dev_id; 1078 struct omap_sham_dev *dd = dev_id;
1076 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
1077
1078 if (!ctx) {
1079 dev_err(dd->dev, "unknown interrupt.\n");
1080 return IRQ_HANDLED;
1081 }
1082 1079
1083 if (unlikely(ctx->flags & FLAGS_FINAL)) 1080 if (unlikely(test_bit(FLAGS_FINAL, &dd->flags)))
1084 /* final -> allow device to go to power-saving mode */ 1081 /* final -> allow device to go to power-saving mode */
1085 omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH); 1082 omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
1086 1083
@@ -1088,8 +1085,12 @@ static irqreturn_t omap_sham_irq(int irq, void *dev_id)
1088 SHA_REG_CTRL_OUTPUT_READY); 1085 SHA_REG_CTRL_OUTPUT_READY);
1089 omap_sham_read(dd, SHA_REG_CTRL); 1086 omap_sham_read(dd, SHA_REG_CTRL);
1090 1087
1091 ctx->flags |= FLAGS_OUTPUT_READY; 1088 if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1092 dd->err = 0; 1089 dev_warn(dd->dev, "Interrupt when no active requests.\n");
1090 return IRQ_HANDLED;
1091 }
1092
1093 set_bit(FLAGS_OUTPUT_READY, &dd->flags);
1093 tasklet_schedule(&dd->done_task); 1094 tasklet_schedule(&dd->done_task);
1094 1095
1095 return IRQ_HANDLED; 1096 return IRQ_HANDLED;
@@ -1102,9 +1103,10 @@ static void omap_sham_dma_callback(int lch, u16 ch_status, void *data)
1102 if (ch_status != OMAP_DMA_BLOCK_IRQ) { 1103 if (ch_status != OMAP_DMA_BLOCK_IRQ) {
1103 pr_err("omap-sham DMA error status: 0x%hx\n", ch_status); 1104 pr_err("omap-sham DMA error status: 0x%hx\n", ch_status);
1104 dd->err = -EIO; 1105 dd->err = -EIO;
1105 dd->flags &= ~FLAGS_INIT; /* request to re-initialize */ 1106 clear_bit(FLAGS_INIT, &dd->flags);/* request to re-initialize */
1106 } 1107 }
1107 1108
1109 set_bit(FLAGS_DMA_READY, &dd->flags);
1108 tasklet_schedule(&dd->done_task); 1110 tasklet_schedule(&dd->done_task);
1109} 1111}
1110 1112
@@ -1151,7 +1153,6 @@ static int __devinit omap_sham_probe(struct platform_device *pdev)
1151 INIT_LIST_HEAD(&dd->list); 1153 INIT_LIST_HEAD(&dd->list);
1152 spin_lock_init(&dd->lock); 1154 spin_lock_init(&dd->lock);
1153 tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd); 1155 tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
1154 tasklet_init(&dd->queue_task, omap_sham_queue_task, (unsigned long)dd);
1155 crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH); 1156 crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
1156 1157
1157 dd->irq = -1; 1158 dd->irq = -1;
@@ -1260,7 +1261,6 @@ static int __devexit omap_sham_remove(struct platform_device *pdev)
1260 for (i = 0; i < ARRAY_SIZE(algs); i++) 1261 for (i = 0; i < ARRAY_SIZE(algs); i++)
1261 crypto_unregister_ahash(&algs[i]); 1262 crypto_unregister_ahash(&algs[i]);
1262 tasklet_kill(&dd->done_task); 1263 tasklet_kill(&dd->done_task);
1263 tasklet_kill(&dd->queue_task);
1264 iounmap(dd->io_base); 1264 iounmap(dd->io_base);
1265 clk_put(dd->iclk); 1265 clk_put(dd->iclk);
1266 omap_sham_dma_cleanup(dd); 1266 omap_sham_dma_cleanup(dd);